diff options
Diffstat (limited to 'ffmpeg/doc')
83 files changed, 0 insertions, 35435 deletions
diff --git a/ffmpeg/doc/APIchanges b/ffmpeg/doc/APIchanges deleted file mode 100644 index 21a8c4c..0000000 --- a/ffmpeg/doc/APIchanges +++ /dev/null @@ -1,1810 +0,0 @@ -Never assume the API of libav* to be stable unless at least 1 month has passed -since the last major version increase or the API was added. - -The last version increases were: -libavcodec: 2013-03-xx -libavdevice: 2013-03-xx -libavfilter: 2013-12-xx -libavformat: 2013-03-xx -libavresample: 2012-10-05 -libpostproc: 2011-04-18 -libswresample: 2011-09-19 -libswscale: 2011-06-20 -libavutil: 2012-10-22 - - -API changes, most recent first: - -2013-12-22 - xxxxxxx - lavu 52.59.100 - avstring.h - Add av_strnlen() function. - -2013-12-xx - xxxxxxx - lavu 52.57.100 - opencl.h - Add av_opencl_benchmark() function. - -2013-11-xx - xxxxxxx - lavu 52.56.100 - ffversion.h - Moves version.h to libavutil/ffversion.h. - Install ffversion.h and make it public. - -2013-12-xx - xxxxxxx - lavc 55.28.1 - avcodec.h - av_frame_alloc(), av_frame_unref() and av_frame_free() now can and should be - used instead of avcodec_alloc_frame(), avcodec_get_frame_defaults() and - avcodec_free_frame() respectively. The latter three functions are deprecated. - -2013-11-xx - xxxxxxx- - lavu 52.20.0 - frame.h - Add AV_FRAME_DATA_STEREO3D value to the AVFrameSideDataType enum and - stereo3d.h API, that identify codec-independent stereo3d information. - -2013-11-xx - xxxxxxx- - lavu 52.19.0 - frame.h - Add AV_FRAME_DATA_A53_CC value to the AVFrameSideDataType enum, which - identifies ATSC A53 Part 4 Closed Captions data. - -2013-11-XX - xxxxxxx - lavu 52.54.100 - avstring.h - Add av_utf8_decode() function. - -2013-11-22 - fb7d70c - lavc 55.44.100 - avcodec.h - Add HEVC profiles - -2013-11-xx - xxxxxxx - lavc 55.44.100 - avcodec.h - Add av_packet_{un,}pack_dictionary() - Add AV_PKT_METADATA_UPDATE side data type, used to transmit key/value - strings between a stream and the application. - -2013-11-xx - xxxxxxx - lavu 52.18.0 - mem.h - Move av_fast_malloc() and av_fast_realloc() for libavcodec to libavutil. - -2013-10-xx - xxxxxxx - lavc 55.27.0 - avcodec.h - Deprecate AVCodecContext.error_rate, it is replaced by the 'error_rate' - private option of the mpegvideo encoder family. - -2013-11-xx - xxxxxxx - lavc 55.26.0 - vdpau.h - Add av_vdpau_get_profile(). - Add av_vdpau_alloc_context(). This function must from now on be - used for allocating AVVDPAUContext. - -2013-11-xx - xxxxxxx - lavc 55.41.100 / 55.25.0 - avcodec.h - lavu 52.51.100 - frame.h - Add ITU-R BT.2020 and other not yet included values to color primaries, - transfer characteristics and colorspaces. - -2013-11-04 - xxxxxxx - lavu 52.50.100 - avutil.h - Add av_fopen_utf8() - -2013-08-xx - xxxxxxx - lavu 52.17.0 - avframe.h - Add AVFrame.flags and AV_FRAME_FLAG_CORRUPT. - -2013-10-27 - xxxxxxx - lavc 55.39.100 - avcodec.h - Add CODEC_CAP_DELAY support to avcodec_decode_subtitle2. - -2013-10-27 - xxxxxxx - lavu 52.48.100 - parseutils.h - Add av_get_known_color_name(). - -2013-10-17 - xxxxxxx - lavu 52.47.100 - opt.h - Add AV_OPT_TYPE_CHANNEL_LAYOUT and channel layout option handlers - av_opt_get_channel_layout() and av_opt_set_channel_layout(). - -2013-10-xx - xxxxxxx -libswscale 2.5.101 - options.c - Change default scaler to bicubic - -2013-10-03 - xxxxxxx - lavc 55.34.100 - avcodec.h - Add av_codec_get_max_lowres() - -2013-10-02 - xxxxxxx - lavf 55.19.100 - avformat.h - Add audio/video/subtitle AVCodec fields to AVFormatContext to force specific - decoders - -2013-08-xx - xxxxxxx - lavfi 3.11.0 - avfilter.h - Add AVFilterGraph.execute and AVFilterGraph.opaque for custom slice threading - implementations. - -2013-09-21 - xxxxxxx - lavu 52.16.0 - pixfmt.h - Add interleaved 4:2:2 8/10-bit formats AV_PIX_FMT_NV16 and - AV_PIX_FMT_NV20. - -2013-09-16 - c74c3fb / 3feb3d6 - lavu 52.44.100 / 52.15.0 - mem.h - Add av_reallocp. - -2013-09-04 - 3e1f507 - lavc 55.31.101 - avcodec.h - avcodec_close() argument can be NULL. - -2013-09-04 - 36cd017 - lavf 55.16.101 - avformat.h - avformat_close_input() argument can be NULL and point on NULL. - -2013-08-29 - e31db62 - lavf 55.15.100 - avformat.h - Add av_format_get_probe_score(). - -2013-08-15 - 1e0e193 - lsws 2.5.100 - - Add a sws_dither AVOption, allowing to set the dither algorithm used - -2013-08-xx - xxxxxxx - lavc 55.27.100 - vdpau.h - Add a render2 alternative to the render callback function. - -2013-08-xx - xxxxxxx - lavc 55.26.100 - vdpau.h - Add allocation function for AVVDPAUContext, allowing - to extend it in the future without breaking ABI/API. - -2013-08-10 - 67a580f / 5a9a9d4 - lavc 55.25.100 / 55.16.0 - avcodec.h - Extend AVPacket API with av_packet_unref, av_packet_ref, - av_packet_move_ref, av_packet_copy_props, av_packet_free_side_data. - -2013-08-05 - 9547e3e / f824535 - lavc 55.22.100 / 55.13.0 - avcodec.h - Deprecate the bitstream-related members from struct AVVDPAUContext. - The bistream buffers no longer need to be explicitly freed. - -2013-08-05 - 3b805dc / 549294f - lavc 55.21.100 / 55.12.0 - avcodec.h - Deprecate the CODEC_CAP_HWACCEL_VDPAU codec capability. Use CODEC_CAP_HWACCEL - and select the AV_PIX_FMT_VDPAU format with get_format() instead. - -2013-08-05 - 4ee0984 / a0ad5d0 - lavu 52.41.100 / 52.14.0 - pixfmt.h - Deprecate AV_PIX_FMT_VDPAU_*. Use AV_PIX_FMT_VDPAU instead. - -2013-08-02 - 82fdfe8 / a8b1927 - lavc 55.20.100 / 55.11.0 - avcodec.h - Add output_picture_number to AVCodecParserContext. - -2013-07-23 - abc8110 - lavc 55.19.100 - avcodec.h - Add avcodec_chroma_pos_to_enum() - Add avcodec_enum_to_chroma_pos() - -2013-07-03 - 838bd73 - lavfi 3.78.100 - avfilter.h - Deprecate avfilter_graph_parse() in favor of the equivalent - avfilter_graph_parse_ptr(). - -2013-06-24 - af5f9c0 / 95d5246 - lavc 55.17.100 / 55.10.0 - avcodec.h - Add MPEG-2 AAC profiles - -2013-06-25 - af5f9c0 / 95d5246 - lavf 55.10.100 - avformat.h - Add AV_DISPOSITION_* flags to indicate text track kind. - -2013-06-15 - 99b8cd0 - lavu 52.36.100 - Add AVRIPEMD: - av_ripemd_alloc() - av_ripemd_init() - av_ripemd_update() - av_ripemd_final() - -2013-06-04 - 30b491f / fc962d4 - lavu 52.35.100 / 52.13.0 - mem.h - Add av_realloc_array and av_reallocp_array - -2013-05-30 - 682b227 - lavu 52.35.100 - Add AVSHA512: - av_sha512_alloc() - av_sha512_init() - av_sha512_update() - av_sha512_final() - -2013-05-24 - 8d4e969 / 129bb23 - lavfi 3.10.0 / 3.70.100 - avfilter.h - Add support for slice multithreading to lavfi. Filters supporting threading - are marked with AVFILTER_FLAG_SLICE_THREADS. - New fields AVFilterContext.thread_type, AVFilterGraph.thread_type and - AVFilterGraph.nb_threads (accessible directly or through AVOptions) may be - used to configure multithreading. - -2013-05-24 - fe40a9f / 2a6eaea - lavu 52.12.0 / 52.34.100 - cpu.h - Add av_cpu_count() function for getting the number of logical CPUs. - -2013-05-24 - 0c25c39 / b493847 - lavc 55.7.0 / 55.12.100 - avcodec.h - Add picture_structure to AVCodecParserContext. - -2013-05-17 - 3a751ea - lavu 52.33.100 - opt.h - Add AV_OPT_TYPE_COLOR value to AVOptionType enum. - -2013-05-13 - e398416 - lavu 52.31.100 - mem.h - Add av_dynarray2_add(). - -2013-05-12 - 1776177 - lavfi 3.65.100 - Add AVFILTER_FLAG_SUPPORT_TIMELINE* filter flags. - -2013-04-19 - 380cfce - lavc 55.4.100 - Add AV_CODEC_PROP_TEXT_SUB property for text based subtitles codec. - -2013-04-18 - 7c1a002 - lavf 55.3.100 - The matroska demuxer can now output proper verbatim ASS packets. It will - become the default starting lavf 56.0.100. - -2013-04-10 - af0d270 - lavu 25.26.100 - avutil.h,opt.h - Add av_int_list_length() - and av_opt_set_int_list(). - -2013-03-30 - 5c73645 - lavu 52.24.100 - samplefmt.h - Add av_samples_alloc_array_and_samples(). - -2013-03-29 - ef7b6b4 - lavf 55.1.100 - avformat.h - Add av_guess_frame_rate() - -2013-03-20 - 8d928a9 - lavu 52.22.100 - opt.h - Add AV_OPT_TYPE_DURATION value to AVOptionType enum. - -2013-03-17 - 7aa9af5 - lavu 52.20.100 - opt.h - Add AV_OPT_TYPE_VIDEO_RATE value to AVOptionType enum. - -2013-03-07 - 9767ec6 - lavu 52.18.100 - avstring.h,bprint.h - Add av_escape() and av_bprint_escape() API. - -2013-02-24 - b59cd08 - lavfi 3.41.100 - buffersink.h - Add sample_rates field to AVABufferSinkParams. - -2013-01-17 - a1a707f - lavf 54.61.100 - Add av_codec_get_tag2(). - -2013-01-01 - 2eb2e17 - lavfi 3.34.100 - Add avfilter_get_audio_buffer_ref_from_arrays_channels. - -2012-12-20 - 34de47aa - lavfi 3.29.100 - avfilter.h - Add AVFilterLink.channels, avfilter_link_get_channels() - and avfilter_ref_get_channels(). - -2012-12-15 - 96d815fc - lavc 54.80.100 - avcodec.h - Add pkt_size field to AVFrame. - -2012-11-25 - c70ec631 - lavu 52.9.100 - opt.h - Add the following convenience functions to opt.h: - av_opt_get_image_size - av_opt_get_pixel_fmt - av_opt_get_sample_fmt - av_opt_set_image_size - av_opt_set_pixel_fmt - av_opt_set_sample_fmt - -2012-11-17 - 4cd74c81 - lavu 52.8.100 - bprint.h - Add av_bprint_strftime(). - -2012-11-15 - 92648107 - lavu 52.7.100 - opt.h - Add av_opt_get_key_value(). - -2012-11-13 - 79456652 - lavfi 3.23.100 - avfilter.h - Add channels field to AVFilterBufferRefAudioProps. - -2012-11-03 - 481fdeee - lavu 52.3.100 - opt.h - Add AV_OPT_TYPE_SAMPLE_FMT value to AVOptionType enum. - -2012-10-21 - 6fb2fd8 - lavc 54.68.100 - avcodec.h - lavfi 3.20.100 - avfilter.h - Add AV_PKT_DATA_STRINGS_METADATA side data type, used to transmit key/value - strings between AVPacket and AVFrame, and add metadata field to - AVCodecContext (which shall not be accessed by users; see AVFrame metadata - instead). - -2012-09-27 - a70b493 - lavd 54.3.100 - version.h - Add LIBAVDEVICE_IDENT symbol. - -2012-09-27 - a70b493 - lavfi 3.18.100 - version.h - Add LIBAVFILTER_IDENT symbol. - -2012-09-27 - a70b493 - libswr 0.16.100 - version.h - Add LIBSWRESAMPLE_VERSION, LIBSWRESAMPLE_BUILD - and LIBSWRESAMPLE_IDENT symbols. - -2012-09-06 - 29e972f - lavu 51.72.100 - parseutils.h - Add av_small_strptime() time parsing function. - - Can be used as a stripped-down replacement for strptime(), on - systems which do not support it. - -2012-08-25 - 2626cc4 - lavf 54.28.100 - Matroska demuxer now identifies SRT subtitles as AV_CODEC_ID_SUBRIP instead - of AV_CODEC_ID_TEXT. - -2012-08-13 - 5c0d8bc - lavfi 3.8.100 - avfilter.h - Add avfilter_get_class() function, and priv_class field to AVFilter - struct. - -2012-08-12 - a25346e - lavu 51.69.100 - opt.h - Add AV_OPT_FLAG_FILTERING_PARAM symbol in opt.h. - -2012-07-31 - 23fc4dd - lavc 54.46.100 - Add channels field to AVFrame. - -2012-07-30 - f893904 - lavu 51.66.100 - Add av_get_channel_description() - and av_get_standard_channel_layout() functions. - -2012-07-21 - 016a472 - lavc 54.43.100 - Add decode_error_flags field to AVFrame. - -2012-07-20 - b062936 - lavf 54.18.100 - Add avformat_match_stream_specifier() function. - -2012-07-14 - f49ec1b - lavc 54.38.100 - avcodec.h - Add metadata to AVFrame, and the accessor functions - av_frame_get_metadata() and av_frame_set_metadata(). - -2012-07-10 - 0e003d8 - lavc 54.33.100 - Add av_fast_padded_mallocz(). - -2012-07-10 - 21d5609 - lavfi 3.2.0 - avfilter.h - Add init_opaque() callback to AVFilter struct. - -2012-06-26 - e6674e4 - lavu 51.63.100 - imgutils.h - Add functions to libavutil/imgutils.h: - av_image_get_buffer_size() - av_image_fill_arrays() - av_image_copy_to_buffer() - -2012-06-24 - c41899a - lavu 51.62.100 - version.h - version moved from avutil.h to version.h - -2012-04-11 - 359abb1 - lavu 51.58.100 - error.h - Add av_make_error_string() and av_err2str() utilities to - libavutil/error.h. - -2012-06-05 - 62b39d4 - lavc 54.24.100 - Add pkt_duration field to AVFrame. - -2012-05-24 - f2ee065 - lavu 51.54.100 - Move AVPALETTE_SIZE and AVPALETTE_COUNT macros from - libavcodec/avcodec.h to libavutil/pixfmt.h. - -2012-05-14 - 94a9ac1 - lavf 54.5.100 - Add av_guess_sample_aspect_ratio() function. - -2012-04-20 - 65fa7bc - lavfi 2.70.100 - Add avfilter_unref_bufferp() to avfilter.h. - -2012-04-13 - 162e400 - lavfi 2.68.100 - Install libavfilter/asrc_abuffer.h public header. - -2012-03-26 - a67d9cf - lavfi 2.66.100 - Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions. - -2013-05-15 - ff46809 / e6c4ac7 - lavu 52.32.100 / 52.11.0 - pixdesc.h - Replace PIX_FMT_* flags with AV_PIX_FMT_FLAG_*. - -2013-04-03 - 6fc58a8 / 507b1e4 - lavc 55.7.100 / 55.4.0 - avcodec.h - Add field_order to AVCodecParserContext. - -2013-04-19 - f4b05cd / 5e83d9a - lavc 55.5.100 / 55.2.0 - avcodec.h - Add CODEC_FLAG_UNALIGNED to allow decoders to produce unaligned output. - -2013-04-11 - lavfi 3.53.100 / 3.8.0 - 231fd44 / 38f0c07 - Move all content from avfiltergraph.h to avfilter.h. Deprecate - avfilterhraph.h, user applications should include just avfilter.h - 86070b8 / bc1a985 - Add avfilter_graph_alloc_filter(), deprecate avfilter_open() and - avfilter_graph_add_filter(). - 4fde705 / 1113672 - Add AVFilterContext.graph pointing to the AVFilterGraph that contains the - filter. - 710b0aa / 48a5ada - Add avfilter_init_str(), deprecate avfilter_init_filter(). - 46de9ba / 1ba95a9 - Add avfilter_init_dict(). - 16fc24b / 7cdd737 - Add AVFilter.flags field and AVFILTER_FLAG_DYNAMIC_{INPUTS,OUTPUTS} flags. - f4db6bf / 7e8fe4b - Add avfilter_pad_count() for counting filter inputs/outputs. - 835cc0f / fa2a34c - Add avfilter_next(), deprecate av_filter_next(). - Deprecate avfilter_uninit(). - -2013-04-09 - lavfi 3.51.100 / 3.7.0 - avfilter.h - 0594ef0 / b439c99 - Add AVFilter.priv_class for exporting filter options through the - AVOptions API in the similar way private options work in lavc and lavf. - 44d4488 / 8114c10 - Add avfilter_get_class(). - Switch all filters to use AVOptions. - -2013-03-19 - 17ebef2 / 2c328a9 - lavu 52.20.100 / 52.9.0 - pixdesc.h - Add av_pix_fmt_count_planes() function for counting planes in a pixel format. - -2013-03-16 - ecade98 / 42c7c61 - lavfi 3.47.100 / 3.6.0 - Add AVFilterGraph.nb_filters, deprecate AVFilterGraph.filter_count. - -2013-03-08 - Reference counted buffers - lavu 52.8.0, lavc 55.0.100 / 55.0.0, lavf 55.0.100 / 55.0.0, -lavd 54.4.100 / 54.0.0, lavfi 3.5.0 - 36099df / 8e401db, 532f31a / 1cec062 - add a new API for reference counted buffers and buffer - pools (new header libavutil/buffer.h). - 2653e12 / 1afddbe - add AVPacket.buf to allow reference counting for the AVPacket data. - Add av_packet_from_data() function for constructing packets from - av_malloc()ed data. - c4e8821 / 7ecc2d4 - move AVFrame from lavc to lavu (new header libavutil/frame.h), add - AVFrame.buf/extended_buf to allow reference counting for the AVFrame - data. Add new API for working with reference-counted AVFrames. - 80e9e63 / 759001c - add the refcounted_frames field to AVCodecContext to make audio and - video decoders return reference-counted frames. Add get_buffer2() - callback to AVCodecContext which allocates reference-counted frames. - Add avcodec_default_get_buffer2() as the default get_buffer2() - implementation. - Deprecate AVCodecContext.get_buffer() / release_buffer() / - reget_buffer(), avcodec_default_get_buffer(), - avcodec_default_reget_buffer(), avcodec_default_release_buffer(). - Remove avcodec_default_free_buffers(), which should not have ever - been called from outside of lavc. - Deprecate the following AVFrame fields: - * base -- is now stored in AVBufferRef - * reference, type, buffer_hints -- are unnecessary in the new API - * hwaccel_picture_private, owner, thread_opaque -- should not - have been acessed from outside of lavc - * qscale_table, qstride, qscale_type, mbskip_table, motion_val, - mb_type, dct_coeff, ref_index -- mpegvideo-specific tables, - which are not exported anymore. - a05a44e / 7e35037 - switch libavfilter to use AVFrame instead of AVFilterBufferRef. Add - av_buffersrc_add_frame(), deprecate av_buffersrc_buffer(). - Add av_buffersink_get_frame() and av_buffersink_get_samples(), - deprecate av_buffersink_read() and av_buffersink_read_samples(). - Deprecate AVFilterBufferRef and all functions for working with it. - -2013-03-17 - 6c17ff8 / 12c5c1d - lavu 52.19.100 / 52.8.0 - avstring.h - Add av_isdigit, av_isgraph, av_isspace, av_isxdigit. - -2013-02-23 - 71cf094 / 9f12235 - lavfi 3.40.100 / 3.4.0 - avfiltergraph.h - Add resample_lavr_opts to AVFilterGraph for setting libavresample options - for auto-inserted resample filters. - -2013-01-25 - e7e14bc / 38c1466 - lavu 52.17.100 / 52.7.0 - dict.h - Add av_dict_parse_string() to set multiple key/value pairs at once from a - string. - -2013-01-25 - 25be630 / b85a5e8 - lavu 52.16.100 / 52.6.0 - avstring.h - Add av_strnstr() - -2013-01-15 - e7e0186 / 8ee288d - lavu 52.15.100 / 52.5.0 - hmac.h - Add AVHMAC. - -2013-01-13 - 8ee7b38 / 44e065d - lavc 54.87.100 / 54.36.0 - vdpau.h - Add AVVDPAUContext struct for VDPAU hardware-accelerated decoding. - -2013-01-12 - dae382b / 169fb94 - lavu 52.14.100 / 52.4.0 - pixdesc.h - Add AV_PIX_FMT_VDPAU flag. - -2013-01-07 - 249fca3 / 074a00d - lavr 1.1.0 - Add avresample_set_channel_mapping() for input channel reordering, - duplication, and silencing. - -2012-12-29 - 2ce43b3 / d8fd06c - lavu 52.13.100 / 52.3.0 - avstring.h - Add av_basename() and av_dirname(). - -2012-11-11 - 03b0787 / 5980f5d - lavu 52.6.100 / 52.2.0 - audioconvert.h - Rename audioconvert.h to channel_layout.h. audioconvert.h is now deprecated. - -2012-11-05 - 7d26be6 / dfde8a3 - lavu 52.5.100 / 52.1.0 - intmath.h - Add av_ctz() for trailing zero bit count - -2012-10-21 - e3a91c5 / a893655 - lavu 51.77.100 / 51.45.0 - error.h - Add AVERROR_EXPERIMENTAL - -2012-10-12 - a33ed6b / d2fcb35 - lavu 51.76.100 / 51.44.0 - pixdesc.h - Add functions for accessing pixel format descriptors. - Accessing the av_pix_fmt_descriptors array directly is now - deprecated. - -2012-10-11 - f391e40 / 9a92aea - lavu 51.75.100 / 51.43.0 - aes.h, md5.h, sha.h, tree.h - Add functions for allocating the opaque contexts for the algorithms, - -2012-10-10 - de31814 / b522000 - lavf 54.32.100 / 54.18.0 - avio.h - Add avio_closep to complement avio_close. - -2012-10-08 - ae77266 / 78071a1 - lavu 51.74.100 / 51.42.0 - pixfmt.h - Rename PixelFormat to AVPixelFormat and all PIX_FMT_* to AV_PIX_FMT_*. - To provide backwards compatibility, PixelFormat is now #defined as - AVPixelFormat. - Note that this can break user code that includes pixfmt.h and uses the - 'PixelFormat' identifier. Such code should either #undef PixelFormat - or stop using the PixelFormat name. - -2012-10-05 - 55c49af / e7ba5b1 - lavr 1.0.0 - avresample.h - Data planes parameters to avresample_convert() and - avresample_read() are now uint8_t** instead of void**. - Libavresample is now stable. - -2012-09-24 - 46a3595 / a42aada - lavc 54.59.100 / 54.28.0 - avcodec.h - Add avcodec_free_frame(). This function must now - be used for freeing an AVFrame. - -2012-09-12 - e3e09f2 / 8919fee - lavu 51.73.100 / 51.41.0 - audioconvert.h - Added AV_CH_LOW_FREQUENCY_2 channel mask value. - -2012-09-04 - b21b5b0 / 686a329 - lavu 51.71.100 / 51.40.0 - opt.h - Reordered the fields in default_val in AVOption, changed which - default_val field is used for which AVOptionType. - -2012-08-30 - 98298eb / a231832 - lavc 54.54.101 / 54.26.1 - avcodec.h - Add codec descriptor properties AV_CODEC_PROP_LOSSY and - AV_CODEC_PROP_LOSSLESS. - -2012-08-18 - lavc 54.26 - avcodec.h - Add codec descriptors for accessing codec properties without having - to refer to a specific decoder or encoder. - - f5f3684 / c223d79 - Add an AVCodecDescriptor struct and functions - avcodec_descriptor_get() and avcodec_descriptor_next(). - f5f3684 / 51efed1 - Add AVCodecDescriptor.props and AV_CODEC_PROP_INTRA_ONLY. - 6c180b3 / 91e59fe - Add avcodec_descriptor_get_by_name(). - -2012-08-08 - f5f3684 / 987170c - lavu 51.68.100 / 51.38.0 - dict.h - Add av_dict_count(). - -2012-08-07 - 7a72695 / 104e10f - lavc 54.51.100 / 54.25.0 - avcodec.h - Rename CodecID to AVCodecID and all CODEC_ID_* to AV_CODEC_ID_*. - To provide backwards compatibility, CodecID is now #defined as AVCodecID. - Note that this can break user code that includes avcodec.h and uses the - 'CodecID' identifier. Such code should either #undef CodecID or stop using the - CodecID name. - -2012-08-03 - e776ee8 / 239fdf1 - lavu 51.66.101 / 51.37.1 - cpu.h - lsws 2.1.1 - swscale.h - Rename AV_CPU_FLAG_MMX2 ---> AV_CPU_FLAG_MMXEXT. - Rename SWS_CPU_CAPS_MMX2 ---> SWS_CPU_CAPS_MMXEXT. - -2012-07-29 - 7c26761 / 681ed00 - lavf 54.22.100 / 54.13.0 - avformat.h - Add AVFMT_FLAG_NOBUFFER for low latency use cases. - -2012-07-10 - fbe0245 / f3e5e6f - lavu 51.65.100 / 51.37.0 - Add av_malloc_array() and av_mallocz_array() - -2012-06-22 - e847f41 / d3d3a32 - lavu 51.61.100 / 51.34.0 - Add av_usleep() - -2012-06-20 - 4da42eb / ae0a301 - lavu 51.60.100 / 51.33.0 - Move av_gettime() to libavutil, add libavutil/time.h - -2012-06-09 - 82edf67 / 3971be0 - lavr 0.0.3 - Add a parameter to avresample_build_matrix() for Dolby/DPLII downmixing. - -2012-06-12 - c7b9eab / 9baeff9 - lavfi 2.79.100 / 2.23.0 - avfilter.h - Add AVFilterContext.nb_inputs/outputs. Deprecate - AVFilterContext.input/output_count. - -2012-06-12 - c7b9eab / 84b9fbe - lavfi 2.79.100 / 2.22.0 - avfilter.h - Add avfilter_pad_get_type() and avfilter_pad_get_name(). Those - should now be used instead of accessing AVFilterPad members - directly. - -2012-06-12 - 3630a07 / b0f0dfc - lavu 51.57.100 / 51.32.0 - audioconvert.h - Add av_get_channel_layout_channel_index(), av_get_channel_name() - and av_channel_layout_extract_channel(). - -2012-05-25 - 53ce990 / 154486f - lavu 51.55.100 / 51.31.0 - opt.h - Add av_opt_set_bin() - -2012-05-15 - lavfi 2.74.100 / 2.17.0 - Add support for audio filters - 61930bd / ac71230, 1cbf7fb / a2cd9be - add video/audio buffer sink in a new installed - header buffersink.h - 1cbf7fb / 720c6b7 - add av_buffersrc_write_frame(), deprecate - av_vsrc_buffer_add_frame() - 61930bd / ab16504 - add avfilter_copy_buf_props() - 61930bd / 9453c9e - add extended_data to AVFilterBuffer - 61930bd / 1b8c927 - add avfilter_get_audio_buffer_ref_from_arrays() - -2012-05-09 - lavu 51.53.100 / 51.30.0 - samplefmt.h - 61930bd / 142e740 - add av_samples_copy() - 61930bd / 6d7f617 - add av_samples_set_silence() - -2012-05-09 - 61930bd / a5117a2 - lavc 54.21.101 / 54.13.1 - For audio formats with fixed frame size, the last frame - no longer needs to be padded with silence, libavcodec - will handle this internally (effectively all encoders - behave as if they had CODEC_CAP_SMALL_LAST_FRAME set). - -2012-05-07 - 653d117 / 828bd08 - lavc 54.20.100 / 54.13.0 - avcodec.h - Add sample_rate and channel_layout fields to AVFrame. - -2012-05-01 - 2330eb1 / 4010d72 - lavr 0.0.1 - Change AV_MIX_COEFF_TYPE_Q6 to AV_MIX_COEFF_TYPE_Q8. - -2012-04-25 - e890b68 / 3527a73 - lavu 51.48.100 / 51.29.0 - cpu.h - Add av_parse_cpu_flags() - -2012-04-24 - 3ead79e / c8af852 - lavr 0.0.0 - Add libavresample audio conversion library - -2012-04-20 - 3194ab7 / 0c0d1bc - lavu 51.47.100 / 51.28.0 - audio_fifo.h - Add audio FIFO functions: - av_audio_fifo_free() - av_audio_fifo_alloc() - av_audio_fifo_realloc() - av_audio_fifo_write() - av_audio_fifo_read() - av_audio_fifo_drain() - av_audio_fifo_reset() - av_audio_fifo_size() - av_audio_fifo_space() - -2012-04-14 - lavfi 2.70.100 / 2.16.0 - avfiltergraph.h - 7432bcf / d7bcc71 Add avfilter_graph_parse2(). - -2012-04-08 - 6bfb304 / 4d693b0 - lavu 51.46.100 / 51.27.0 - samplefmt.h - Add av_get_packed_sample_fmt() and av_get_planar_sample_fmt() - -2012-03-21 - b75c67d - lavu 51.43.100 - Add bprint.h for bprint API. - -2012-02-21 - 9cbf17e - lavc 54.4.100 - Add av_get_pcm_codec() function. - -2012-02-16 - 560b224 - libswr 0.7.100 - Add swr_set_matrix() function. - -2012-02-09 - c28e7af - lavu 51.39.100 - Add a new installed header libavutil/timestamp.h with timestamp - utilities. - -2012-02-06 - 70ffda3 - lavu 51.38.100 - Add av_parse_ratio() function to parseutils.h. - -2012-02-06 - 70ffda3 - lavu 51.38.100 - Add AV_LOG_MAX_OFFSET macro to log.h. - -2012-02-02 - 0eaa123 - lavu 51.37.100 - Add public timecode helpers. - -2012-01-24 - 0c3577b - lavfi 2.60.100 - Add avfilter_graph_dump. - -2012-03-20 - 0ebd836 / 3c90cc2 - lavfo 54.2.0 - Deprecate av_read_packet(), use av_read_frame() with - AVFMT_FLAG_NOPARSE | AVFMT_FLAG_NOFILLIN in AVFormatContext.flags - -2012-03-05 - lavc 54.10.100 / 54.8.0 - f095391 / 6699d07 Add av_get_exact_bits_per_sample() - f095391 / 9524cf7 Add av_get_audio_frame_duration() - -2012-03-04 - 2af8f2c / 44fe77b - lavc 54.8.100 / 54.7.0 - avcodec.h - Add av_codec_is_encoder/decoder(). - -2012-03-01 - 1eb7f39 / 442c132 - lavc 54.5.100 / 54.3.0 - avcodec.h - Add av_packet_shrink_side_data. - -2012-02-29 - 79ae084 / dd2a4bc - lavf 54.2.100 / 54.2.0 - avformat.h - Add AVStream.attached_pic and AV_DISPOSITION_ATTACHED_PIC, - used for dealing with attached pictures/cover art. - -2012-02-25 - 305e4b3 / c9bca80 - lavu 51.41.100 / 51.24.0 - error.h - Add AVERROR_UNKNOWN - NOTE: this was backported to 0.8 - -2012-02-20 - eadd426 / e9cda85 - lavc 54.2.100 / 54.2.0 - Add duration field to AVCodecParserContext - -2012-02-20 - eadd426 / 0b42a93 - lavu 51.40.100 / 51.23.1 - mathematics.h - Add av_rescale_q_rnd() - -2012-02-08 - f2b20b7 / 38d5533 - lavu 51.38.101 / 51.22.1 - pixdesc.h - Add PIX_FMT_PSEUDOPAL flag. - -2012-02-08 - f2b20b7 / 52f82a1 - lavc 54.2.100 / 54.1.0 - Add avcodec_encode_video2() and deprecate avcodec_encode_video(). - -2012-02-01 - 4c677df / 316fc74 - lavc 54.1.0 - Add av_fast_padded_malloc() as alternative for av_realloc() when aligned - memory is required. The buffer will always have FF_INPUT_BUFFER_PADDING_SIZE - zero-padded bytes at the end. - -2012-01-31 - a369a6b / dd6d3b0 - lavf 54.1.0 - Add avformat_get_riff_video_tags() and avformat_get_riff_audio_tags(). - NOTE: this was backported to 0.8 - -2012-01-31 - a369a6b / af08d9a - lavc 54.1.0 - Add avcodec_is_open() function. - NOTE: this was backported to 0.8 - -2012-01-30 - 151ecc2 / 8b93312 - lavu 51.36.100 / 51.22.0 - intfloat.h - Add a new installed header libavutil/intfloat.h with int/float punning - functions. - NOTE: this was backported to 0.8 - -2012-01-25 - lavf 53.31.100 / 53.22.0 - 3c5fe5b / f1caf01 Allow doing av_write_frame(ctx, NULL) for flushing possible - buffered data within a muxer. Added AVFMT_ALLOW_FLUSH for - muxers supporting it (av_write_frame makes sure it is called - only for muxers with this flag). - -2012-01-15 - lavc 53.56.105 / 53.34.0 - New audio encoding API: - 67f5650 / b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio - encoders. - 67f5650 / 5ee5fa0 Add avcodec_fill_audio_frame() as a convenience function. - 67f5650 / b2c75b6 Add avcodec_encode_audio2() and deprecate avcodec_encode_audio(). - Add AVCodec.encode2(). - -2012-01-12 - b18e17e / 3167dc9 - lavfi 2.59.100 / 2.15.0 - Add a new installed header -- libavfilter/version.h -- with version macros. - -2011-12-08 - a502939 - lavfi 2.52.0 - Add av_buffersink_poll_frame() to buffersink.h. - -2011-12-08 - 26c6fec - lavu 51.31.0 - Add av_log_format_line. - -2011-12-03 - 976b095 - lavu 51.30.0 - Add AVERROR_BUG. - -2011-11-24 - 573ffbb - lavu 51.28.1 - Add av_get_alt_sample_fmt() to samplefmt.h. - -2011-11-03 - 96949da - lavu 51.23.0 - Add av_strcasecmp() and av_strncasecmp() to avstring.h. - -2011-10-20 - b35e9e1 - lavu 51.22.0 - Add av_strtok() to avstring.h. - -2012-01-03 - ad1c8dd / b73ec05 - lavu 51.34.100 / 51.21.0 - Add av_popcount64 - -2011-12-18 - 7c29313 / 8400b12 - lavc 53.46.1 / 53.28.1 - Deprecate AVFrame.age. The field is unused. - -2011-12-12 - 8bc7fe4 / 5266045 - lavf 53.25.0 / 53.17.0 - Add avformat_close_input(). - Deprecate av_close_input_file() and av_close_input_stream(). - -2011-12-02 - e4de716 / 0eea212 - lavc 53.40.0 / 53.25.0 - Add nb_samples and extended_data fields to AVFrame. - Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE. - Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4(). - avcodec_decode_audio4() writes output samples to an AVFrame, which allows - audio decoders to use get_buffer(). - -2011-12-04 - e4de716 / 560f773 - lavc 53.40.0 / 53.24.0 - Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump. - Change AVPicture.data[4]/linesize[4] to [8] at next major bump. - Change AVCodecContext.error[4] to [8] at next major bump. - Add AV_NUM_DATA_POINTERS to simplify the bump transition. - -2011-11-23 - 8e576d5 / bbb46f3 - lavu 51.27.0 / 51.18.0 - Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and - av_samples_alloc(), to samplefmt.h. - -2011-11-23 - 8e576d5 / 8889cc4 - lavu 51.27.0 / 51.17.0 - Add planar sample formats and av_sample_fmt_is_planar() to samplefmt.h. - -2011-11-19 - dbb38bc / f3a29b7 - lavc 53.36.0 / 53.21.0 - Move some AVCodecContext fields to a new private struct, AVCodecInternal, - which is accessed from a new field, AVCodecContext.internal. - - fields moved: - AVCodecContext.internal_buffer --> AVCodecInternal.buffer - AVCodecContext.internal_buffer_count --> AVCodecInternal.buffer_count - AVCodecContext.is_copy --> AVCodecInternal.is_copy - -2011-11-16 - 8709ba9 / 6270671 - lavu 51.26.0 / 51.16.0 - Add av_timegm() - -2011-11-13 - lavf 53.21.0 / 53.15.0 - New interrupt callback API, allowing per-AVFormatContext/AVIOContext - interrupt callbacks. - 5f268ca / 6aa0b98 Add AVIOInterruptCB struct and the interrupt_callback field to - AVFormatContext. - 5f268ca / 1dee0ac Add avio_open2() with additional parameters. Those are - an interrupt callback and an options AVDictionary. - This will allow passing AVOptions to protocols after lavf - 54.0. - -2011-11-06 - 13b7781 / ba04ecf - lavu 51.24.0 / 51.14.0 - Add av_strcasecmp() and av_strncasecmp() to avstring.h. - -2011-11-06 - 13b7781 / 07b172f - lavu 51.24.0 / 51.13.0 - Add av_toupper()/av_tolower() - -2011-11-05 - d8cab5c / b6d08f4 - lavf 53.19.0 / 53.13.0 - Add avformat_network_init()/avformat_network_deinit() - -2011-10-27 - 6faf0a2 / 512557b - lavc 53.24.0 / 53.15.0 - Remove avcodec_parse_frame. - Deprecate AVCodecContext.parse_only and CODEC_CAP_PARSE_ONLY. - -2011-10-19 - d049257 / 569129a - lavf 53.17.0 / 53.10.0 - Add avformat_new_stream(). Deprecate av_new_stream(). - -2011-10-13 - 91eb1b1 / b631fba - lavf 53.16.0 / 53.9.0 - Add AVFMT_NO_BYTE_SEEK AVInputFormat flag. - -2011-10-12 - lavu 51.21.0 / 51.12.0 - AVOptions API rewrite. - - - f884ef0 / 145f741 FF_OPT_TYPE* renamed to AV_OPT_TYPE_* - - new setting/getting functions with slightly different semantics: - f884ef0 / dac66da av_set_string3 -> av_opt_set - av_set_double -> av_opt_set_double - av_set_q -> av_opt_set_q - av_set_int -> av_opt_set_int - - f884ef0 / 41d9d51 av_get_string -> av_opt_get - av_get_double -> av_opt_get_double - av_get_q -> av_opt_get_q - av_get_int -> av_opt_get_int - - - f884ef0 / 8c5dcaa trivial rename av_next_option -> av_opt_next - - f884ef0 / 641c7af new functions - av_opt_child_next, av_opt_child_class_next - and av_opt_find2() - -2011-09-22 - a70e787 - lavu 51.17.0 - Add av_x_if_null(). - -2011-09-18 - 645cebb - lavc 53.16.0 - Add showall flag2 - -2011-09-16 - ea8de10 - lavfi 2.42.0 - Add avfilter_all_channel_layouts. - -2011-09-16 - 9899037 - lavfi 2.41.0 - Rename avfilter_all_* function names to avfilter_make_all_*. - - In particular, apply the renames: - avfilter_all_formats -> avfilter_make_all_formats - avfilter_all_channel_layouts -> avfilter_make_all_channel_layouts - avfilter_all_packing_formats -> avfilter_make_all_packing_formats - -2011-09-12 - 4381bdd - lavfi 2.40.0 - Change AVFilterBufferRefAudioProps.sample_rate type from uint32_t to int. - -2011-09-12 - 2c03174 - lavfi 2.40.0 - Simplify signature for avfilter_get_audio_buffer(), make it - consistent with avfilter_get_video_buffer(). - -2011-09-06 - 4f7dfe1 - lavfi 2.39.0 - Rename libavfilter/vsink_buffer.h to libavfilter/buffersink.h. - -2011-09-06 - c4415f6 - lavfi 2.38.0 - Unify video and audio sink API. - - In particular, add av_buffersink_get_buffer_ref(), deprecate - av_vsink_buffer_get_video_buffer_ref() and change the value for the - opaque field passed to the abuffersink init function. - -2011-09-04 - 61e2e29 - lavu 51.16.0 - Add av_asprintf(). - -2011-08-22 - dacd827 - lavf 53.10.0 - Add av_find_program_from_stream(). - -2011-08-20 - 69e2c1a - lavu 51.13.0 - Add av_get_media_type_string(). - -2011-09-03 - 1889c67 / fb4ca26 - lavc 53.13.0 - lavf 53.11.0 - lsws 2.1.0 - Add {avcodec,avformat,sws}_get_class(). - -2011-08-03 - 1889c67 / c11fb82 - lavu 51.15.0 - Add AV_OPT_SEARCH_FAKE_OBJ flag for av_opt_find() function. - -2011-08-14 - 323b930 - lavu 51.12.0 - Add av_fifo_peek2(), deprecate av_fifo_peek(). - -2011-08-26 - lavu 51.14.0 / 51.9.0 - - 976a8b2 / add41de..976a8b2 / abc78a5 Do not include intfloat_readwrite.h, - mathematics.h, rational.h, pixfmt.h, or log.h from avutil.h. - -2011-08-16 - 27fbe31 / 48f9e45 - lavf 53.11.0 / 53.8.0 - Add avformat_query_codec(). - -2011-08-16 - 27fbe31 / bca06e7 - lavc 53.11.0 - Add avcodec_get_type(). - -2011-08-06 - 0cb233c / 2f63440 - lavf 53.7.0 - Add error_recognition to AVFormatContext. - -2011-08-02 - 1d186e9 / 9d39cbf - lavc 53.9.1 - Add AV_PKT_FLAG_CORRUPT AVPacket flag. - -2011-07-16 - b57df29 - lavfi 2.27.0 - Add audio packing negotiation fields and helper functions. - - In particular, add AVFilterPacking enum, planar, in_packings and - out_packings fields to AVFilterLink, and the functions: - avfilter_set_common_packing_formats() - avfilter_all_packing_formats() - -2011-07-10 - 3602ad7 / a67c061 - lavf 53.6.0 - Add avformat_find_stream_info(), deprecate av_find_stream_info(). - NOTE: this was backported to 0.7 - -2011-07-10 - 3602ad7 / 0b950fe - lavc 53.8.0 - Add avcodec_open2(), deprecate avcodec_open(). - NOTE: this was backported to 0.7 - - Add avcodec_alloc_context3. Deprecate avcodec_alloc_context() and - avcodec_alloc_context2(). - -2011-07-01 - b442ca6 - lavf 53.5.0 - avformat.h - Add function av_get_output_timestamp(). - -2011-06-28 - 5129336 - lavu 51.11.0 - avutil.h - Define the AV_PICTURE_TYPE_NONE value in AVPictureType enum. - -2011-06-19 - fd2c0a5 - lavfi 2.23.0 - avfilter.h - Add layout negotiation fields and helper functions. - - In particular, add in_chlayouts and out_chlayouts to AVFilterLink, - and the functions: - avfilter_set_common_sample_formats() - avfilter_set_common_channel_layouts() - avfilter_all_channel_layouts() - -2011-06-19 - 527ca39 - lavfi 2.22.0 - AVFilterFormats - Change type of AVFilterFormats.formats from int * to int64_t *, - and update formats handling API accordingly. - - avfilter_make_format_list() still takes a int32_t array and converts - it to int64_t. A new function, avfilter_make_format64_list(), that - takes int64_t arrays has been added. - -2011-06-19 - 44f669e - lavfi 2.21.0 - vsink_buffer.h - Add video sink buffer and vsink_buffer.h public header. - -2011-06-12 - 9fdf772 - lavfi 2.18.0 - avcodec.h - Add avfilter_get_video_buffer_ref_from_frame() function in - libavfilter/avcodec.h. - -2011-06-12 - c535494 - lavfi 2.17.0 - avfiltergraph.h - Add avfilter_inout_alloc() and avfilter_inout_free() functions. - -2011-06-12 - 6119b23 - lavfi 2.16.0 - avfilter_graph_parse() - Change avfilter_graph_parse() signature. - -2011-06-23 - 686959e / 67e9ae1 - lavu 51.10.0 / 51.8.0 - attributes.h - Add av_printf_format(). - -2011-06-16 - 2905e3f / 05e84c9, 2905e3f / 25de595 - lavf 53.4.0 / 53.2.0 - avformat.h - Add avformat_open_input and avformat_write_header(). - Deprecate av_open_input_stream, av_open_input_file, - AVFormatParameters and av_write_header. - -2011-06-16 - 2905e3f / 7e83e1c, 2905e3f / dc59ec5 - lavu 51.9.0 / 51.7.0 - opt.h - Add av_opt_set_dict() and av_opt_find(). - Deprecate av_find_opt(). - Add AV_DICT_APPEND flag. - -2011-06-10 - 45fb647 / cb7c11c - lavu 51.6.0 - opt.h - Add av_opt_flag_is_set(). - -2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays - Add avfilter_get_audio_buffer_ref_from_arrays() to avfilter.h. - -2011-06-09 - f9ecb84 / d9f80ea - lavu 51.8.0 - AVMetadata - Move AVMetadata from lavf to lavu and rename it to - AVDictionary -- new installed header dict.h. - All av_metadata_* functions renamed to av_dict_*. - -2011-06-07 - d552f61 / a6703fa - lavu 51.8.0 - av_get_bytes_per_sample() - Add av_get_bytes_per_sample() in libavutil/samplefmt.h. - Deprecate av_get_bits_per_sample_fmt(). - -2011-06-05 - f956924 / b39b062 - lavu 51.8.0 - opt.h - Add av_opt_free convenience function. - -2011-06-06 - 95a0242 - lavfi 2.14.0 - AVFilterBufferRefAudioProps - Remove AVFilterBufferRefAudioProps.size, and use nb_samples in - avfilter_get_audio_buffer() and avfilter_default_get_audio_buffer() in - place of size. - -2011-06-06 - 0bc2cca - lavu 51.6.0 - av_samples_alloc() - Switch nb_channels and nb_samples parameters order in - av_samples_alloc(). - -2011-06-06 - e1c7414 - lavu 51.5.0 - av_samples_* - Change the data layout created by av_samples_fill_arrays() and - av_samples_alloc(). - -2011-06-06 - 27bcf55 - lavfi 2.13.0 - vsrc_buffer.h - Make av_vsrc_buffer_add_video_buffer_ref() accepts an additional - flags parameter in input. - -2011-06-03 - e977ca2 - lavfi 2.12.0 - avfilter_link_free() - Add avfilter_link_free() function. - -2011-06-02 - 5ad38d9 - lavu 51.4.0 - av_force_cpu_flags() - Add av_cpu_flags() in libavutil/cpu.h. - -2011-05-28 - e71f260 - lavu 51.3.0 - pixdesc.h - Add av_get_pix_fmt_name() in libavutil/pixdesc.h, and deprecate - avcodec_get_pix_fmt_name() in libavcodec/avcodec.h in its favor. - -2011-05-25 - 39e4206 / 30315a8 - lavf 53.3.0 - avformat.h - Add fps_probe_size to AVFormatContext. - -2011-05-22 - 5ecdfd0 - lavf 53.2.0 - avformat.h - Introduce avformat_alloc_output_context2() and deprecate - avformat_alloc_output_context(). - -2011-05-22 - 83db719 - lavfi 2.10.0 - vsrc_buffer.h - Make libavfilter/vsrc_buffer.h public. - -2011-05-19 - c000a9f - lavfi 2.8.0 - avcodec.h - Add av_vsrc_buffer_add_frame() to libavfilter/avcodec.h. - -2011-05-14 - 9fdf772 - lavfi 2.6.0 - avcodec.h - Add avfilter_get_video_buffer_ref_from_frame() to libavfilter/avcodec.h. - -2011-05-18 - 75a37b5 / 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt - Add request_sample_fmt field to AVCodecContext. - -2011-05-10 - 59eb12f / 188dea1 - lavc 53.6.0 - avcodec.h - Deprecate AVLPCType and the following fields in - AVCodecContext: lpc_coeff_precision, prediction_order_method, - min_partition_order, max_partition_order, lpc_type, lpc_passes. - Corresponding FLAC encoder options should be used instead. - -2011-05-07 - 9fdf772 - lavfi 2.5.0 - avcodec.h - Add libavfilter/avcodec.h header and avfilter_copy_frame_props() - function. - -2011-05-07 - 18ded93 - lavc 53.5.0 - AVFrame - Add format field to AVFrame. - -2011-05-07 - 22333a6 - lavc 53.4.0 - AVFrame - Add width and height fields to AVFrame. - -2011-05-01 - 35fe66a - lavfi 2.4.0 - avfilter.h - Rename AVFilterBufferRefVideoProps.pixel_aspect to - sample_aspect_ratio. - -2011-05-01 - 77e9dee - lavc 53.3.0 - AVFrame - Add a sample_aspect_ratio field to AVFrame. - -2011-05-01 - 1ba5727 - lavc 53.2.0 - AVFrame - Add a pkt_pos field to AVFrame. - -2011-04-29 - 35ceaa7 - lavu 51.2.0 - mem.h - Add av_dynarray_add function for adding - an element to a dynamic array. - -2011-04-26 - d7e5aeb / bebe72f - lavu 51.1.0 - avutil.h - Add AVPictureType enum and av_get_picture_type_char(), deprecate - FF_*_TYPE defines and av_get_pict_type_char() defined in - libavcodec/avcodec.h. - -2011-04-26 - d7e5aeb / 10d3940 - lavfi 2.3.0 - avfilter.h - Add pict_type and key_frame fields to AVFilterBufferRefVideo. - -2011-04-26 - d7e5aeb / 7a11c82 - lavfi 2.2.0 - vsrc_buffer - Add sample_aspect_ratio fields to vsrc_buffer arguments - -2011-04-21 - 8772156 / 94f7451 - lavc 53.1.0 - avcodec.h - Add CODEC_CAP_SLICE_THREADS for codecs supporting sliced threading. - -2011-04-15 - lavc 52.120.0 - avcodec.h - AVPacket structure got additional members for passing side information: - c407984 / 4de339e introduce side information for AVPacket - c407984 / 2d8591c make containers pass palette change in AVPacket - -2011-04-12 - lavf 52.107.0 - avio.h - Avio cleanup, part II - deprecate the entire URLContext API: - c55780d / 175389c add avio_check as a replacement for url_exist - 9891004 / ff1ec0c add avio_pause and avio_seek_time as replacements - for _av_url_read_fseek/fpause - d4d0932 / cdc6a87 deprecate av_protocol_next(), avio_enum_protocols - should be used instead. - c88caa5 / 80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb. - c88caa5 / f87b1b3 rename open flags: URL_* -> AVIO_* - d4d0932 / f8270bb add avio_enum_protocols. - d4d0932 / 5593f03 deprecate URLProtocol. - d4d0932 / c486dad deprecate URLContext. - d4d0932 / 026e175 deprecate the typedef for URLInterruptCB - c88caa5 / 8e76a19 deprecate av_register_protocol2. - 11d7841 / b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME - 11d7841 / 1305d93 deprecate av_url_read_seek - 11d7841 / fa104e1 deprecate av_url_read_pause - 434f248 / 727c7aa deprecate url_get_filename(). - 434f248 / 5958df3 deprecate url_max_packet_size(). - 434f248 / 1869ea0 deprecate url_get_file_handle(). - 434f248 / 32a97d4 deprecate url_filesize(). - 434f248 / e52a914 deprecate url_close(). - 434f248 / 58a48c6 deprecate url_seek(). - 434f248 / 925e908 deprecate url_write(). - 434f248 / dce3756 deprecate url_read_complete(). - 434f248 / bc371ac deprecate url_read(). - 434f248 / 0589da0 deprecate url_open(). - 434f248 / 62eaaea deprecate url_connect. - 434f248 / 5652bb9 deprecate url_alloc. - 434f248 / 333e894 deprecate url_open_protocol - 434f248 / e230705 deprecate url_poll and URLPollEntry - -2011-04-08 - lavf 52.106.0 - avformat.h - Minor avformat.h cleanup: - d4d0932 / a9bf9d8 deprecate av_guess_image2_codec - d4d0932 / c3675df rename avf_sdp_create->av_sdp_create - -2011-04-03 - lavf 52.105.0 - avio.h - Large-scale renaming/deprecating of AVIOContext-related functions: - 2cae980 / 724f6a0 deprecate url_fdopen - 2cae980 / 403ee83 deprecate url_open_dyn_packet_buf - 2cae980 / 6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf - 2cae980 / b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf - 2cae980 / 8978fed introduce an AVIOContext.seekable field as a replacement for - AVIOContext.is_streamed and url_is_streamed() - 1caa412 / b64030f deprecate get_checksum() - 1caa412 / 4c4427a deprecate init_checksum() - 2fd41c9 / 4ec153b deprecate udp_set_remote_url/get_local_port - 4fa0e24 / 933e90a deprecate av_url_read_fseek/fpause - 4fa0e24 / 8d9769a deprecate url_fileno - 0fecf26 / b7f2fdd rename put_flush_packet -> avio_flush - 0fecf26 / 35f1023 deprecate url_close_buf - 0fecf26 / 83fddae deprecate url_open_buf - 0fecf26 / d9d86e0 rename url_fprintf -> avio_printf - 0fecf26 / 59f65d9 deprecate url_setbufsize - 6947b0c / 3e68b3b deprecate url_ferror - e8bb2e2 deprecate url_fget_max_packet_size - 76aa876 rename url_fsize -> avio_size - e519753 deprecate url_fgetc - 655e45e deprecate url_fgets - a2704c9 rename url_ftell -> avio_tell - e16ead0 deprecate get_strz() in favor of avio_get_str - 0300db8,2af07d3 rename url_fskip -> avio_skip - 6b4aa5d rename url_fseek -> avio_seek - 61840b4 deprecate put_tag - 22a3212 rename url_fopen/fclose -> avio_open/close. - 0ac8e2b deprecate put_nbyte - 77eb550 rename put_byte -> avio_w8 - put_[b/l]e<type> -> avio_w[b/l]<type> - put_buffer -> avio_write - b7effd4 rename get_byte -> avio_r8, - get_[b/l]e<type> -> avio_r[b/l]<type> - get_buffer -> avio_read - b3db9ce deprecate get_partial_buffer - 8d9ac96 rename av_alloc_put_byte -> avio_alloc_context - -2011-03-25 - 27ef7b1 / 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type - Add audio_service_type field to AVCodecContext. - -2011-03-17 - e309fdc - lavu 50.40.0 - pixfmt.h - Add PIX_FMT_BGR48LE and PIX_FMT_BGR48BE pixel formats - -2011-03-02 - 863c471 - lavf 52.103.0 - av_pkt_dump2, av_pkt_dump_log2 - Add new functions av_pkt_dump2, av_pkt_dump_log2 that uses the - source stream timebase for outputting timestamps. Deprecate - av_pkt_dump and av_pkt_dump_log. - -2011-02-20 - e731b8d - lavf 52.102.0 - avio.h - * e731b8d - rename init_put_byte() to ffio_init_context(), deprecating the - original, and move it to a private header so it is no longer - part of our public API. Instead, use av_alloc_put_byte(). - * ae628ec - rename ByteIOContext to AVIOContext. - -2011-02-16 - 09d171b - lavf 52.101.0 - avformat.h - lavu 52.39.0 - parseutils.h - * 610219a - Add av_ prefix to dump_format(). - * f6c7375 - Replace parse_date() in lavf with av_parse_time() in lavu. - * ab0287f - Move find_info_tag from lavf to lavu and add av_prefix to it. - -2011-02-15 - lavu 52.38.0 - merge libavcore - libavcore is merged back completely into libavutil - -2011-02-10 - 55bad0c - lavc 52.113.0 - vbv_delay - Add vbv_delay field to AVCodecContext - -2011-02-14 - 24a83bd - lavf 52.100.0 - AV_DISPOSITION_CLEAN_EFFECTS - Add AV_DISPOSITION_CLEAN_EFFECTS disposition flag. - -2011-02-14 - 910b5b8 - lavfi 1.76.0 - AVFilterLink sample_aspect_ratio - Add sample_aspect_ratio field to AVFilterLink. - -2011-02-10 - 12c14cd - lavf 52.99.0 - AVStream.disposition - Add AV_DISPOSITION_HEARING_IMPAIRED and AV_DISPOSITION_VISUAL_IMPAIRED. - -2011-02-09 - c0b102c - lavc 52.112.0 - avcodec_thread_init() - Deprecate avcodec_thread_init()/avcodec_thread_free() use; instead - set thread_count before calling avcodec_open. - -2011-02-09 - 37b00b4 - lavc 52.111.0 - threading API - Add CODEC_CAP_FRAME_THREADS with new restrictions on get_buffer()/ - release_buffer()/draw_horiz_band() callbacks for appropriate codecs. - Add thread_type and active_thread_type fields to AVCodecContext. - -2011-02-08 - 3940caa - lavf 52.98.0 - av_probe_input_buffer - Add av_probe_input_buffer() to avformat.h for probing format from a - ByteIOContext. - -2011-02-06 - fe174fc - lavf 52.97.0 - avio.h - Add flag for non-blocking protocols: URL_FLAG_NONBLOCK - -2011-02-04 - f124b08 - lavf 52.96.0 - avformat_free_context() - Add avformat_free_context() in avformat.h. - -2011-02-03 - f5b82f4 - lavc 52.109.0 - add CODEC_ID_PRORES - Add CODEC_ID_PRORES to avcodec.h. - -2011-02-03 - fe9a3fb - lavc 52.109.0 - H.264 profile defines - Add defines for H.264 * Constrained Baseline and Intra profiles - -2011-02-02 - lavf 52.95.0 - * 50196a9 - add a new installed header version.h. - * 4efd5cf, dccbd97, 93b78d1 - add several variants of public - avio_{put,get}_str* functions. Deprecate corresponding semi-public - {put,get}_str*. - -2011-02-02 - dfd2a00 - lavu 50.37.0 - log.h - Make av_dlog public. - -2011-01-31 - 7b3ea55 - lavfi 1.76.0 - vsrc_buffer - Add sample_aspect_ratio fields to vsrc_buffer arguments - -2011-01-31 - 910b5b8 - lavfi 1.75.0 - AVFilterLink sample_aspect_ratio - Add sample_aspect_ratio field to AVFilterLink. - -2011-01-15 - a242ac3 - lavfi 1.74.0 - AVFilterBufferRefAudioProps - Rename AVFilterBufferRefAudioProps.samples_nb to nb_samples. - -2011-01-14 - 7f88a5b - lavf 52.93.0 - av_metadata_copy() - Add av_metadata_copy() in avformat.h. - -2011-01-07 - 81c623f - lavc 52.107.0 - deprecate reordered_opaque - Deprecate reordered_opaque in favor of pkt_pts/dts. - -2011-01-07 - 1919fea - lavc 52.106.0 - pkt_dts - Add pkt_dts to AVFrame, this will in the future allow multithreading decoders - to not mess up dts. - -2011-01-07 - 393cbb9 - lavc 52.105.0 - pkt_pts - Add pkt_pts to AVFrame. - -2011-01-07 - 060ec0a - lavc 52.104.0 - av_get_profile_name() - Add av_get_profile_name to libavcodec/avcodec.h. - -2010-12-27 - 0ccabee - lavfi 1.71.0 - AV_PERM_NEG_LINESIZES - Add AV_PERM_NEG_LINESIZES in avfilter.h. - -2010-12-27 - 9128ae0 - lavf 52.91.0 - av_find_best_stream() - Add av_find_best_stream to libavformat/avformat.h. - -2010-12-27 - 107a7e3 - lavf 52.90.0 - Add AVFMT_NOSTREAMS flag for formats with no streams, - like e.g. text metadata. - -2010-12-22 - 0328b9e - lavu 50.36.0 - file.h - Add functions av_file_map() and av_file_unmap() in file.h. - -2010-12-19 - 0bc55f5 - lavu 50.35.0 - error.h - Add "not found" error codes: - AVERROR_DEMUXER_NOT_FOUND - AVERROR_MUXER_NOT_FOUND - AVERROR_DECODER_NOT_FOUND - AVERROR_ENCODER_NOT_FOUND - AVERROR_PROTOCOL_NOT_FOUND - AVERROR_FILTER_NOT_FOUND - AVERROR_BSF_NOT_FOUND - AVERROR_STREAM_NOT_FOUND - -2010-12-09 - c61cdd0 - lavcore 0.16.0 - avcore.h - Move AV_NOPTS_VALUE, AV_TIME_BASE, AV_TIME_BASE_Q symbols from - avcodec.h to avcore.h. - -2010-12-04 - 16cfc96 - lavc 52.98.0 - CODEC_CAP_NEG_LINESIZES - Add CODEC_CAP_NEG_LINESIZES codec capability flag in avcodec.h. - -2010-12-04 - bb4afa1 - lavu 50.34.0 - av_get_pix_fmt_string() - Deprecate avcodec_pix_fmt_string() in favor of - pixdesc.h/av_get_pix_fmt_string(). - -2010-12-04 - 4da12e3 - lavcore 0.15.0 - av_image_alloc() - Add av_image_alloc() to libavcore/imgutils.h. - -2010-12-02 - 037be76 - lavfi 1.67.0 - avfilter_graph_create_filter() - Add function avfilter_graph_create_filter() in avfiltergraph.h. - -2010-11-25 - 4723bc2 - lavfi 1.65.0 - avfilter_get_video_buffer_ref_from_arrays() - Add function avfilter_get_video_buffer_ref_from_arrays() in - avfilter.h. - -2010-11-21 - 176a615 - lavcore 0.14.0 - audioconvert.h - Add a public audio channel API in audioconvert.h, and deprecate the - corresponding functions in libavcodec: - avcodec_get_channel_name() - avcodec_get_channel_layout() - avcodec_get_channel_layout_string() - avcodec_channel_layout_num_channels() - and the CH_* macros defined in libavcodec/avcodec.h. - -2010-11-21 - 6bfc268 - lavf 52.85.0 - avformat.h - Add av_append_packet(). - -2010-11-21 - a08d918 - lavc 52.97.0 - avcodec.h - Add av_grow_packet(). - -2010-11-17 - 0985e1a - lavcore 0.13.0 - parseutils.h - Add av_parse_color() declared in libavcore/parseutils.h. - -2010-11-13 - cb2c971 - lavc 52.95.0 - AVCodecContext - Add AVCodecContext.subtitle_header and AVCodecContext.subtitle_header_size - fields. - -2010-11-13 - 5aaea02 - lavfi 1.62.0 - avfiltergraph.h - Make avfiltergraph.h public. - -2010-11-13 - 4fcbb2a - lavfi 1.61.0 - avfiltergraph.h - Remove declarations from avfiltergraph.h for the functions: - avfilter_graph_check_validity() - avfilter_graph_config_links() - avfilter_graph_config_formats() - which are now internal. - Use avfilter_graph_config() instead. - -2010-11-08 - d2af720 - lavu 50.33.0 - eval.h - Deprecate functions: - av_parse_and_eval_expr(), - av_parse_expr(), - av_eval_expr(), - av_free_expr(), - in favor of the functions: - av_expr_parse_and_eval(), - av_expr_parse(), - av_expr_eval(), - av_expr_free(). - -2010-11-08 - 24de0ed - lavfi 1.59.0 - avfilter_free() - Rename avfilter_destroy() to avfilter_free(). - This change breaks libavfilter API/ABI. - -2010-11-07 - 1e80a0e - lavfi 1.58.0 - avfiltergraph.h - Remove graphparser.h header, move AVFilterInOut and - avfilter_graph_parse() declarations to libavfilter/avfiltergraph.h. - -2010-11-07 - 7313132 - lavfi 1.57.0 - AVFilterInOut - Rename field AVFilterInOut.filter to AVFilterInOut.filter_ctx. - This change breaks libavfilter API. - -2010-11-04 - 97dd1e4 - lavfi 1.56.0 - avfilter_graph_free() - Rename avfilter_graph_destroy() to avfilter_graph_free(). - This change breaks libavfilter API/ABI. - -2010-11-04 - e15aeea - lavfi 1.55.0 - avfilter_graph_alloc() - Add avfilter_graph_alloc() to libavfilter/avfiltergraph.h. - -2010-11-02 - 6f84cd1 - lavcore 0.12.0 - av_get_bits_per_sample_fmt() - Add av_get_bits_per_sample_fmt() to libavcore/samplefmt.h and - deprecate av_get_bits_per_sample_format(). - -2010-11-02 - d63e456 - lavcore 0.11.0 - samplefmt.h - Add sample format functions in libavcore/samplefmt.h: - av_get_sample_fmt_name(), - av_get_sample_fmt(), - av_get_sample_fmt_string(), - and deprecate the corresponding libavcodec/audioconvert.h functions: - avcodec_get_sample_fmt_name(), - avcodec_get_sample_fmt(), - avcodec_sample_fmt_string(). - -2010-11-02 - 262d1c5 - lavcore 0.10.0 - samplefmt.h - Define enum AVSampleFormat in libavcore/samplefmt.h, deprecate enum - SampleFormat. - -2010-10-16 - 2a24df9 - lavfi 1.52.0 - avfilter_graph_config() - Add the function avfilter_graph_config() in avfiltergraph.h. - -2010-10-15 - 03700d3 - lavf 52.83.0 - metadata API - Change demuxers to export metadata in generic format and - muxers to accept generic format. Deprecate the public - conversion API. - -2010-10-10 - 867ae7a - lavfi 1.49.0 - AVFilterLink.time_base - Add time_base field to AVFilterLink. - -2010-09-27 - c85eef4 - lavu 50.31.0 - av_set_options_string() - Move av_set_options_string() from libavfilter/parseutils.h to - libavutil/opt.h. - -2010-09-27 - acc0490 - lavfi 1.47.0 - AVFilterLink - Make the AVFilterLink fields srcpad and dstpad store the pointers to - the source and destination pads, rather than their indexes. - -2010-09-27 - 372e288 - lavu 50.30.0 - av_get_token() - Move av_get_token() from libavfilter/parseutils.h to - libavutil/avstring.h. - -2010-09-26 - 635d4ae - lsws 0.12.0 - swscale.h - Add the functions sws_alloc_context() and sws_init_context(). - -2010-09-26 - 6ed0404 - lavu 50.29.0 - opt.h - Move libavcodec/opt.h to libavutil/opt.h. - -2010-09-24 - 1c1c80f - lavu 50.28.0 - av_log_set_flags() - Default of av_log() changed due to many problems to the old no repeat - detection. Read the docs of AV_LOG_SKIP_REPEATED in log.h before - enabling it for your app!. - -2010-09-24 - f66eb58 - lavc 52.90.0 - av_opt_show2() - Deprecate av_opt_show() in favor or av_opt_show2(). - -2010-09-14 - bc6f0af - lavu 50.27.0 - av_popcount() - Add av_popcount() to libavutil/common.h. - -2010-09-08 - c6c98d0 - lavu 50.26.0 - av_get_cpu_flags() - Add av_get_cpu_flags(). - -2010-09-07 - 34017fd - lavcore 0.9.0 - av_image_copy() - Add av_image_copy(). - -2010-09-07 - 9686abb - lavcore 0.8.0 - av_image_copy_plane() - Add av_image_copy_plane(). - -2010-09-07 - 9b7269e - lavcore 0.7.0 - imgutils.h - Adopt hierarchical scheme for the imgutils.h function names, - deprecate the old names. - -2010-09-04 - 7160bb7 - lavu 50.25.0 - AV_CPU_FLAG_* - Deprecate the FF_MM_* flags defined in libavcodec/avcodec.h in favor - of the AV_CPU_FLAG_* flags defined in libavutil/cpu.h. - -2010-08-26 - 5da19b5 - lavc 52.87.0 - avcodec_get_channel_layout() - Add avcodec_get_channel_layout() in audioconvert.h. - -2010-08-20 - e344336 - lavcore 0.6.0 - av_fill_image_max_pixsteps() - Rename av_fill_image_max_pixstep() to av_fill_image_max_pixsteps(). - -2010-08-18 - a6ddf8b - lavcore 0.5.0 - av_fill_image_max_pixstep() - Add av_fill_image_max_pixstep() in imgutils.h. - -2010-08-17 - 4f2d2e4 - lavu 50.24.0 - AV_NE() - Add the AV_NE macro. - -2010-08-17 - ad2c950 - lavfi 1.36.0 - audio framework - Implement AVFilterBufferRefAudioProps struct for audio properties, - get_audio_buffer(), filter_samples() functions and related changes. - -2010-08-12 - 81c1eca - lavcore 0.4.0 - av_get_image_linesize() - Add av_get_image_linesize() in imgutils.h. - -2010-08-11 - c1db7bf - lavfi 1.34.0 - AVFilterBufferRef - Resize data and linesize arrays in AVFilterBufferRef to 8. - - This change breaks libavfilter API/ABI. - -2010-08-11 - 9f08d80 - lavc 52.85.0 - av_picture_data_copy() - Add av_picture_data_copy in avcodec.h. - -2010-08-11 - 84c0386 - lavfi 1.33.0 - avfilter_open() - Change avfilter_open() signature: - AVFilterContext *avfilter_open(AVFilter *filter, const char *inst_name) -> - int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name); - - This change breaks libavfilter API/ABI. - -2010-08-11 - cc80caf - lavfi 1.32.0 - AVFilterBufferRef - Add a type field to AVFilterBufferRef, and move video specific - properties to AVFilterBufferRefVideoProps. - - This change breaks libavfilter API/ABI. - -2010-08-07 - 5d4890d - lavfi 1.31.0 - AVFilterLink - Rename AVFilterLink fields: - AVFilterLink.srcpic -> AVFilterLink.src_buf - AVFilterLink.cur_pic -> AVFilterLink.cur_buf - AVFilterLink.outpic -> AVFilterLink.out_buf - -2010-08-07 - 7fce481 - lavfi 1.30.0 - Rename functions and fields: - avfilter_(un)ref_pic -> avfilter_(un)ref_buffer - avfilter_copy_picref_props -> avfilter_copy_buffer_ref_props - AVFilterBufferRef.pic -> AVFilterBufferRef.buffer - -2010-08-07 - ecc8dad - lavfi 1.29.0 - AVFilterBufferRef - Rename AVFilterPicRef to AVFilterBufferRef. - -2010-08-07 - d54e094 - lavfi 1.28.0 - AVFilterBuffer - Move format field from AVFilterBuffer to AVFilterPicRef. - -2010-08-06 - bf176f5 - lavcore 0.3.0 - av_check_image_size() - Deprecate avcodec_check_dimensions() in favor of the function - av_check_image_size() defined in libavcore/imgutils.h. - -2010-07-30 - 56b5e9d - lavfi 1.27.0 - AVFilterBuffer - Increase size of the arrays AVFilterBuffer.data and - AVFilterBuffer.linesize from 4 to 8. - - This change breaks libavfilter ABI. - -2010-07-29 - e7bd48a - lavcore 0.2.0 - imgutils.h - Add functions av_fill_image_linesizes() and - av_fill_image_pointers(), declared in libavcore/imgutils.h. - -2010-07-27 - 126b638 - lavcore 0.1.0 - parseutils.h - Deprecate av_parse_video_frame_size() and av_parse_video_frame_rate() - defined in libavcodec in favor of the newly added functions - av_parse_video_size() and av_parse_video_rate() declared in - libavcore/parseutils.h. - -2010-07-23 - 4485247 - lavu 50.23.0 - mathematics.h - Add the M_PHI constant definition. - -2010-07-22 - bdab614 - lavfi 1.26.0 - media format generalization - Add a type field to AVFilterLink. - - Change the field types: - enum PixelFormat format -> int format in AVFilterBuffer - enum PixelFormat *formats -> int *formats in AVFilterFormats - enum PixelFormat *format -> int format in AVFilterLink - - Change the function signatures: - AVFilterFormats *avfilter_make_format_list(const enum PixelFormat *pix_fmts); -> - AVFilterFormats *avfilter_make_format_list(const int *fmts); - - int avfilter_add_colorspace(AVFilterFormats **avff, enum PixelFormat pix_fmt); -> - int avfilter_add_format (AVFilterFormats **avff, int fmt); - - AVFilterFormats *avfilter_all_colorspaces(void); -> - AVFilterFormats *avfilter_all_formats (enum AVMediaType type); - - This change breaks libavfilter API/ABI. - -2010-07-21 - aac6ca6 - lavcore 0.0.0 - Add libavcore. - -2010-07-17 - b5c582f - lavfi 1.25.0 - AVFilterBuffer - Remove w and h fields from AVFilterBuffer. - -2010-07-17 - f0d77b2 - lavfi 1.24.0 - AVFilterBuffer - Rename AVFilterPic to AVFilterBuffer. - -2010-07-17 - 57fe80f - lavf 52.74.0 - url_fskip() - Make url_fskip() return an int error code instead of void. - -2010-07-11 - 23940f1 - lavc 52.83.0 - Add AVCodecContext.lpc_type and AVCodecContext.lpc_passes fields. - Add AVLPCType enum. - Deprecate AVCodecContext.use_lpc. - -2010-07-11 - e1d7c88 - lavc 52.82.0 - avsubtitle_free() - Add a function for free the contents of a AVSubtitle generated by - avcodec_decode_subtitle. - -2010-07-11 - b91d08f - lavu 50.22.0 - bswap.h and intreadwrite.h - Make the bswap.h and intreadwrite.h API public. - -2010-07-08 - ce1cd1c - lavu 50.21.0 - pixdesc.h - Rename read/write_line() to av_read/write_image_line(). - -2010-07-07 - 4d508e4 - lavfi 1.21.0 - avfilter_copy_picref_props() - Add avfilter_copy_picref_props(). - -2010-07-03 - 2d525ef - lavc 52.79.0 - Add FF_COMPLIANCE_UNOFFICIAL and change all instances of - FF_COMPLIANCE_INOFFICIAL to use FF_COMPLIANCE_UNOFFICIAL. - -2010-07-02 - 89eec74 - lavu 50.20.0 - lfg.h - Export av_lfg_init(), av_lfg_get(), av_mlfg_get(), and av_bmg_get() through - lfg.h. - -2010-06-28 - a52e2c3 - lavfi 1.20.1 - av_parse_color() - Extend av_parse_color() syntax, make it accept an alpha value specifier and - set the alpha value to 255 by default. - -2010-06-22 - 735cf6b - lavf 52.71.0 - URLProtocol.priv_data_size, priv_data_class - Add priv_data_size and priv_data_class to URLProtocol. - -2010-06-22 - ffbb289 - lavf 52.70.0 - url_alloc(), url_connect() - Add url_alloc() and url_connect(). - -2010-06-22 - 9b07a2d - lavf 52.69.0 - av_register_protocol2() - Add av_register_protocol2(), deprecating av_register_protocol(). - -2010-06-09 - 65db058 - lavu 50.19.0 - av_compare_mod() - Add av_compare_mod() to libavutil/mathematics.h. - -2010-06-05 - 0b99215 - lavu 50.18.0 - eval API - Make the eval API public. - -2010-06-04 - 31878fc - lavu 50.17.0 - AV_BASE64_SIZE - Add AV_BASE64_SIZE() macro. - -2010-06-02 - 7e566bb - lavc 52.73.0 - av_get_codec_tag_string() - Add av_get_codec_tag_string(). - -2010-06-01 - 2b99142 - lsws 0.11.0 - convertPalette API - Add sws_convertPalette8ToPacked32() and sws_convertPalette8ToPacked24(). - -2010-05-26 - 93ebfee - lavc 52.72.0 - CODEC_CAP_EXPERIMENTAL - Add CODEC_CAP_EXPERIMENTAL flag. - NOTE: this was backported to 0.6 - -2010-05-23 - 9977863 - lavu 50.16.0 - av_get_random_seed() - Add av_get_random_seed(). - -2010-05-18 - 796ac23 - lavf 52.63.0 - AVFMT_FLAG_RTP_HINT - Add AVFMT_FLAG_RTP_HINT as possible value for AVFormatContext.flags. - NOTE: this was backported to 0.6 - -2010-05-09 - b6bc205 - lavfi 1.20.0 - AVFilterPicRef - Add interlaced and top_field_first fields to AVFilterPicRef. - -------------------------------8<------------------------------------- - 0.6 branch was cut here ------------------------------>8-------------------------------------- - -2010-05-01 - 8e2ee18 - lavf 52.62.0 - probe function - Add av_probe_input_format2 to API, it allows ignoring probe - results below given score and returns the actual probe score. - -2010-04-01 - 3dd6180 - lavf 52.61.0 - metadata API - Add a flag for av_metadata_set2() to disable overwriting of - existing tags. - -2010-04-01 - 0fb49b5 - lavc 52.66.0 - Add avcodec_get_edge_width(). - -2010-03-31 - d103218 - lavc 52.65.0 - Add avcodec_copy_context(). - -2010-03-31 - 1a70d12 - lavf 52.60.0 - av_match_ext() - Make av_match_ext() public. - -2010-03-31 - 1149150 - lavu 50.14.0 - AVMediaType - Move AVMediaType enum from libavcodec to libavutil. - -2010-03-31 - 72415b2 - lavc 52.64.0 - AVMediaType - Define AVMediaType enum, and use it instead of enum CodecType, which - is deprecated and will be dropped at the next major bump. - -2010-03-25 - 8795823 - lavu 50.13.0 - av_strerror() - Implement av_strerror(). - -2010-03-23 - e1484eb - lavc 52.60.0 - av_dct_init() - Support DCT-I and DST-I. - -2010-03-15 - b8819c8 - lavf 52.56.0 - AVFormatContext.start_time_realtime - Add AVFormatContext.start_time_realtime field. - -2010-03-13 - 5bb5c1d - lavfi 1.18.0 - AVFilterPicRef.pos - Add AVFilterPicRef.pos field. - -2010-03-13 - 60c144f - lavu 50.12.0 - error.h - Move error code definitions from libavcodec/avcodec.h to - the new public header libavutil/error.h. - -2010-03-07 - c709483 - lavc 52.56.0 - avfft.h - Add public FFT interface. - -2010-03-06 - ac6ef86 - lavu 50.11.0 - av_stristr() - Add av_stristr(). - -2010-03-03 - 4b83fc0 - lavu 50.10.0 - av_tree_enumerate() - Add av_tree_enumerate(). - -2010-02-07 - b687c1a - lavu 50.9.0 - av_compare_ts() - Add av_compare_ts(). - -2010-02-05 - 3f3dc76 - lsws 0.10.0 - sws_getCoefficients() - Add sws_getCoefficients(). - -2010-02-01 - ca76a11 - lavf 52.50.0 - metadata API - Add a list of generic tag names, change 'author' -> 'artist', - 'year' -> 'date'. - -2010-01-30 - 80a07f6 - lavu 50.8.0 - av_get_pix_fmt() - Add av_get_pix_fmt(). - -2010-01-21 - 01cc47d - lsws 0.9.0 - sws_scale() - Change constness attributes of sws_scale() parameters. - -2010-01-10 - 3fb8e77 - lavfi 1.15.0 - avfilter_graph_config_links() - Add a log_ctx parameter to avfilter_graph_config_links(). - -2010-01-07 - 8e9767f - lsws 0.8.0 - sws_isSupported{In,Out}put() - Add sws_isSupportedInput() and sws_isSupportedOutput() functions. - -2010-01-06 - c1d662f - lavfi 1.14.0 - avfilter_add_colorspace() - Change the avfilter_add_colorspace() signature, make it accept an - (AVFilterFormats **) rather than an (AVFilterFormats *) as before. - -2010-01-03 - 4fd1f18 - lavfi 1.13.0 - avfilter_add_colorspace() - Add avfilter_add_colorspace(). - -2010-01-02 - 8eb631f - lavf 52.46.0 - av_match_ext() - Add av_match_ext(), it should be used in place of match_ext(). - -2010-01-01 - a1f547b - lavf 52.45.0 - av_guess_format() - Add av_guess_format(), it should be used in place of guess_format(). - -2009-12-13 - a181981 - lavf 52.43.0 - metadata API - Add av_metadata_set2(), AV_METADATA_DONT_STRDUP_KEY and - AV_METADATA_DONT_STRDUP_VAL. - -2009-12-13 - 277c733 - lavu 50.7.0 - avstring.h API - Add av_d2str(). - -2009-12-13 - 02b398e - lavc 52.42.0 - AVStream - Add avg_frame_rate. - -2009-12-12 - 3ba69a1 - lavu 50.6.0 - av_bmg_next() - Introduce the av_bmg_next() function. - -2009-12-05 - a13a543 - lavfi 1.12.0 - avfilter_draw_slice() - Add a slice_dir parameter to avfilter_draw_slice(). - -2009-11-26 - 4cc3f6a - lavfi 1.11.0 - AVFilter - Remove the next field from AVFilter, this is not anymore required. - -2009-11-25 - 1433c4a - lavfi 1.10.0 - avfilter_next() - Introduce the avfilter_next() function. - -2009-11-25 - 86a60fa - lavfi 1.9.0 - avfilter_register() - Change the signature of avfilter_register() to make it return an - int. This is required since now the registration operation may fail. - -2009-11-25 - 74a0059 - lavu 50.5.0 - pixdesc.h API - Make the pixdesc.h API public. - -2009-10-27 - 243110f - lavfi 1.5.0 - AVFilter.next - Add a next field to AVFilter, this is used for simplifying the - registration and management of the registered filters. - -2009-10-23 - cccd292 - lavfi 1.4.1 - AVFilter.description - Add a description field to AVFilter. - -2009-10-19 - 6b5dc05 - lavfi 1.3.0 - avfilter_make_format_list() - Change the interface of avfilter_make_format_list() from - avfilter_make_format_list(int n, ...) to - avfilter_make_format_list(enum PixelFormat *pix_fmts). - -2009-10-18 - 0eb4ff9 - lavfi 1.0.0 - avfilter_get_video_buffer() - Make avfilter_get_video_buffer() recursive and add the w and h - parameters to it. - -2009-10-07 - 46c40e4 - lavfi 0.5.1 - AVFilterPic - Add w and h fields to AVFilterPic. - -2009-06-22 - 92400be - lavf 52.34.1 - AVFormatContext.packet_size - This is now an unsigned int instead of a signed int. - -2009-06-19 - a4276ba - lavc 52.32.0 - AVSubtitle.pts - Add a pts field to AVSubtitle which gives the subtitle packet pts - in AV_TIME_BASE. Some subtitle de-/encoders (e.g. XSUB) will - not work right without this. - -2009-06-03 - 8f3f2e0 - lavc 52.30.2 - AV_PKT_FLAG_KEY - PKT_FLAG_KEY has been deprecated and will be dropped at the next - major version. Use AV_PKT_FLAG_KEY instead. - -2009-06-01 - f988ce6 - lavc 52.30.0 - av_lockmgr_register() - av_lockmgr_register() can be used to register a callback function - that lavc (and in the future, libraries that depend on lavc) can use - to implement mutexes. The application should provide a callback function - that implements the AV_LOCK_* operations described in avcodec.h. - When the lock manager is registered, FFmpeg is guaranteed to behave - correctly in a multi-threaded application. - -2009-04-30 - ce1d9c8 - lavc 52.28.0 - av_free_packet() - av_free_packet() is no longer an inline function. It is now exported. - -2009-04-11 - 80d403f - lavc 52.25.0 - deprecate av_destruct_packet_nofree() - Please use NULL instead. This has been supported since r16506 - (lavf > 52.23.1, lavc > 52.10.0). - -2009-04-07 - 7a00bba - lavc 52.23.0 - avcodec_decode_video/audio/subtitle - The old decoding functions are deprecated, all new code should use the - new functions avcodec_decode_video2(), avcodec_decode_audio3() and - avcodec_decode_subtitle2(). These new functions take an AVPacket *pkt - argument instead of a const uint8_t *buf / int buf_size pair. - -2009-04-03 - 7b09db3 - lavu 50.3.0 - av_fifo_space() - Introduce the av_fifo_space() function. - -2009-04-02 - fabd246 - lavc 52.23.0 - AVPacket - Move AVPacket declaration from libavformat/avformat.h to - libavcodec/avcodec.h. - -2009-03-22 - 6e08ca9 - lavu 50.2.0 - RGB32 pixel formats - Convert the pixel formats PIX_FMT_ARGB, PIX_FMT_RGBA, PIX_FMT_ABGR, - PIX_FMT_BGRA, which were defined as macros, into enum PixelFormat values. - Conversely PIX_FMT_RGB32, PIX_FMT_RGB32_1, PIX_FMT_BGR32 and - PIX_FMT_BGR32_1 are now macros. - avcodec_get_pix_fmt() now recognizes the "rgb32" and "bgr32" aliases. - Re-sort the enum PixelFormat list accordingly. - This change breaks API/ABI backward compatibility. - -2009-03-22 - f82674e - lavu 50.1.0 - PIX_FMT_RGB5X5 endian variants - Add the enum PixelFormat values: - PIX_FMT_RGB565BE, PIX_FMT_RGB565LE, PIX_FMT_RGB555BE, PIX_FMT_RGB555LE, - PIX_FMT_BGR565BE, PIX_FMT_BGR565LE, PIX_FMT_BGR555BE, PIX_FMT_BGR555LE. - -2009-03-21 - ee6624e - lavu 50.0.0 - av_random* - The Mersenne Twister PRNG implemented through the av_random* functions - was removed. Use the lagged Fibonacci PRNG through the av_lfg* functions - instead. - -2009-03-08 - 41dd680 - lavu 50.0.0 - AVFifoBuffer - av_fifo_init, av_fifo_read, av_fifo_write and av_fifo_realloc were dropped - and replaced by av_fifo_alloc, av_fifo_generic_read, av_fifo_generic_write - and av_fifo_realloc2. - In addition, the order of the function arguments of av_fifo_generic_read - was changed to match av_fifo_generic_write. - The AVFifoBuffer/struct AVFifoBuffer may only be used in an opaque way by - applications, they may not use sizeof() or directly access members. - -2009-03-01 - ec26457 - lavf 52.31.0 - Generic metadata API - Introduce a new metadata API (see av_metadata_get() and friends). - The old API is now deprecated and should not be used anymore. This especially - includes the following structure fields: - - AVFormatContext.title - - AVFormatContext.author - - AVFormatContext.copyright - - AVFormatContext.comment - - AVFormatContext.album - - AVFormatContext.year - - AVFormatContext.track - - AVFormatContext.genre - - AVStream.language - - AVStream.filename - - AVProgram.provider_name - - AVProgram.name - - AVChapter.title diff --git a/ffmpeg/doc/Doxyfile b/ffmpeg/doc/Doxyfile deleted file mode 100644 index 6488aad..0000000 --- a/ffmpeg/doc/Doxyfile +++ /dev/null @@ -1,1624 +0,0 @@ -# Doxyfile 1.7.1 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project -# -# All text after a hash (#) is considered a comment and will be ignored -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" ") - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# http://www.gnu.org/software/libiconv for the list of possible encodings. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded -# by quotes) that should identify the project. - -PROJECT_NAME = FFmpeg - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = - -# With the PROJECT_LOGO tag one can specify a logo or icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will -# copy the logo to the output directory. -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = doc/doxy - -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create -# 4096 sub-directories (in 2 levels) under the output directory of each output -# format and will distribute the generated files over these directories. -# Enabling this option can be useful when feeding doxygen a huge amount of -# source files, where putting all generated files in the same directory would -# otherwise cause performance problems for the file system. - -CREATE_SUBDIRS = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, -# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, -# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English -# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, -# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, -# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator -# that is used to form the text in various listings. Each string -# in this list, if found as the leading text of the brief description, will be -# stripped from the text and the result after processing the whole list, is -# used as the annotated text. Otherwise, the brief description is used as-is. -# If left blank, the following values are used ("$name" is automatically -# replaced with the name of the entity): "The $name class" "The $name widget" -# "The $name file" "is" "provides" "specifies" "contains" -# "represents" "a" "an" "the" - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = YES - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user-defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the -# path to strip. - -STRIP_FROM_PATH = . - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of -# the path mentioned in the documentation of a class, which tells -# the reader which header file to include in order to use a class. -# If left blank only the name of the header file containing the class -# definition is used. Otherwise one should specify the include paths that -# are normally passed to the compiler using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful is your file systems -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like regular Qt-style comments -# (thus requiring an explicit @brief command for a brief description.) - -JAVADOC_AUTOBRIEF = YES - -# If the QT_AUTOBRIEF tag is set to YES then Doxygen will -# interpret the first line (until the first dot) of a Qt-style -# comment as the brief description. If set to NO, the comments -# will behave just like regular Qt-style comments (thus requiring -# an explicit \brief command for a brief description.) - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen -# treat a multi-line C++ special comment block (i.e. a block of //! or /// -# comments) as a brief description. This used to be the default behaviour. -# The new default is to treat a multi-line C++ comment block as a detailed -# description. Set this tag to YES if you prefer the old behaviour instead. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# re-implements. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce -# a new page for each member. If set to NO, the documentation of a member will -# be part of the file/class/namespace that contains it. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 8 - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C -# sources only. Doxygen will then generate output that is more tailored for C. -# For instance, some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = YES - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java -# sources only. Doxygen will then generate output that is more tailored for -# Java. For instance, namespaces will be presented as packages, qualified -# scopes will look different, etc. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources only. Doxygen will then generate output that is more tailored for -# Fortran. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for -# VHDL. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given extension. -# Doxygen has a built-in mapping, but you can override or extend it using this -# tag. The format is ext=language, where ext is a file extension, and language -# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, -# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make -# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C -# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions -# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should -# set this tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. -# func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. -# Doxygen will parse them like normal C++ but will assume all classes use public -# instead of private inheritance when no explicit protection keyword is present. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate getter -# and setter methods for a property. Setting this option to YES (the default) -# will make doxygen to replace the get and set methods by a property in the -# documentation. This will only work if the methods are indeed getting or -# setting a simple type. If this is not the case, or you want to show the -# methods anyway, you should set this option to NO. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES (the default) to allow class member groups of -# the same type (for instance a group of public functions) to be put as a -# subgroup of that type (e.g. under the Public Functions section). Set it to -# NO to prevent subgrouping. Alternatively, this can be done per class using -# the \nosubgrouping command. - -SUBGROUPING = YES - -# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum -# is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically -# be useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. - -TYPEDEF_HIDES_STRUCT = YES - -# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to -# determine which symbols to keep in memory and which to flush to disk. -# When the cache is full, less often used symbols will be written to disk. -# For small to medium size projects (<1000 input files) the default value is -# probably good enough. For larger projects a too small cache size can cause -# doxygen to be busy swapping symbols to and from disk most of the time -# causing a significant performance penality. -# If the system has enough physical memory increasing the cache will improve the -# performance by keeping more symbols in memory. Note that the value works on -# a logarithmic scale so increasing the size by one will roughly double the -# memory usage. The cache size is given by this formula: -# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, -# corresponding to a cache size of 2^16 = 65536 symbols - -SYMBOL_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = YES - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = YES - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. When set to YES local -# methods, which are defined in the implementation section but not in -# the interface are included in the documentation. -# If set to NO (the default) only methods in the interface are included. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base -# name of the file that contains the anonymous namespace. By default -# anonymous namespace are hidden. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these classes will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all -# friend (class|struct|union) declarations. -# If set to NO (the default) these declarations will be included in the -# documentation. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any -# documentation blocks found inside the body of a function. -# If set to NO (the default) these blocks will be appended to the -# function's detailed documentation block. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put a list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen -# will list include files with double quotes in the documentation -# rather than with sharp brackets. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = NO - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the -# brief documentation of file, namespace and class members alphabetically -# by member name. If set to NO (the default) the members will appear in -# declaration order. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen -# will sort the (brief and detailed) documentation of class members so that -# constructors and destructors are listed first. If set to NO (the default) -# the constructors will appear in the respective orders defined by -# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. -# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO -# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the -# hierarchy of group names into alphabetical order. If set to NO (the default) -# the group names will appear in their defined order. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be -# sorted by fully-qualified names, including namespaces. If set to -# NO (the default), the class list will be sorted only by class name, -# not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the -# alphabetical list. - -SORT_BY_SCOPE_NAME = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or -# disable (NO) the deprecated list. This list is created by putting -# \deprecated commands in the documentation. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or define consists of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and defines in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. -# This will remove the Files entry from the Quick Index and from the -# Folder Tree View (if specified). The default is YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the -# Namespaces page. -# This will remove the Namespaces entry from the Quick Index -# and from the Folder Tree View (if specified). The default is YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command <command> <input-file>, where <command> is the value of -# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file -# provided by doxygen. Whatever the program writes to standard output -# is used as the file version. See the manual for examples. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. The create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. -# You can optionally specify a file name after the option, if omitted -# DoxygenLayout.xml will be used as the name of the layout file. - -LAYOUT_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = YES - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some -# parameters in a documented function, or documenting parameters that -# don't exist or using markup commands wrongly. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be abled to get warnings for -# functions that are documented, but have no documentation for their parameters -# or return value. If set to NO (the default) doxygen will only warn about -# wrong or incomplete parameter documentation, but not about the absence of -# documentation. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. Optionally the format may contain -# $version, which will be replaced by the version of the file (if it could -# be obtained via FILE_VERSION_FILTER) - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is -# also the default input encoding. Doxygen uses libiconv (or the iconv built -# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for -# the list of possible encodings. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx -# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used select whether or not files or -# directories that are symbolic links (a Unix filesystem feature) are excluded -# from the input. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. Note that the wildcards are matched -# against the file with absolute path, so to exclude all test directories -# for example use the pattern */test/* - -EXCLUDE_PATTERNS = *.git \ - *.d - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = doc/examples/ - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = *.c - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command <filter> <input-file>, where <filter> -# is the value of the INPUT_FILTER tag, and <input-file> is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. -# If FILTER_PATTERNS is specified, this tag will be -# ignored. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. -# Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. -# The filters are a list of the form: -# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further -# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER -# is applied to all files. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse (i.e. when SOURCE_BROWSER is set to YES). - -FILTER_SOURCE_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. -# Note: To get rid of all source code in the generated output, make sure also -# VERBATIM_HEADERS is set to NO. - -SOURCE_BROWSER = YES - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C and C++ comments will always remain visible. - -STRIP_CODE_COMMENTS = NO - -# If the REFERENCED_BY_RELATION tag is set to YES -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = YES - -# If the REFERENCES_RELATION tag is set to YES -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) -# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from -# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will -# link to the source code. -# Otherwise they will link to the documentation. - -REFERENCES_LINK_SOURCE = YES - -# If the USE_HTAGS tag is set to YES then the references to source code -# will point to the HTML generated by the htags(1) tool instead of doxygen -# built-in source browser. The htags tool is part of GNU's global source -# tagging system (see http://www.gnu.org/software/global/global.html). You -# will need version 4.8.6 or higher. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = YES - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 2 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. - -#HTML_HEADER = doc/doxy/header.html - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -#HTML_FOOTER = doc/doxy/footer.html - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If the tag is left blank doxygen -# will generate a default style sheet. Note that doxygen will try to copy -# the style sheet file to the HTML output directory, so don't put your own -# stylesheet in the HTML output directory as well, or it will be erased! - -#HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. -# Doxygen will adjust the colors in the stylesheet and background images -# according to this color. Hue is specified as an angle on a colorwheel, -# see http://en.wikipedia.org/wiki/Hue for more information. -# For instance the value 0 represents red, 60 is yellow, 120 is green, -# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. -# The allowed range is 0 to 359. - -#HTML_COLORSTYLE_HUE = 120 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of -# the colors in the HTML output. For a value of 0 the output will use -# grayscales only. A value of 255 will produce the most vivid colors. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to -# the luminance component of the colors in the HTML output. Values below -# 100 gradually make the output lighter, whereas values above 100 make -# the output darker. The value divided by 100 is the actual gamma applied, -# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, -# and 100 does not change the gamma. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting -# this to NO can help when comparing the output of multiple runs. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. For this to work a browser that supports -# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox -# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). - -HTML_DYNAMIC_SECTIONS = NO - -# If the GENERATE_DOCSET tag is set to YES, additional index files -# will be generated that can be used as input for Apple's Xcode 3 -# integrated development environment, introduced with OS X 10.5 (Leopard). -# To create a documentation set, doxygen will generate a Makefile in the -# HTML output directory. Running make will produce the docset in that -# directory and running "make install" will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find -# it at startup. -# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. - -GENERATE_DOCSET = NO - -# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the -# feed. A documentation feed provides an umbrella under which multiple -# documentation sets from a single provider (such as a company or product suite) -# can be grouped. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that -# should uniquely identify the documentation set bundle. This should be a -# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen -# will append .docset to the name. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can -# be used to specify the file name of the resulting .chm file. You -# can add a path in front of the file if the result should not be -# written to the html output directory. - -CHM_FILE = - -# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can -# be used to specify the location (absolute path including file name) of -# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run -# the HTML help compiler on the generated index.hhp. - -HHC_LOCATION = - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING -# is used to encode HtmlHelp index (hhk), content (hhc) and project file -# content. - -CHM_INDEX_ENCODING = - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the HTML help documentation and to the tree view. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated -# that can be used as input for Qt's qhelpgenerator to generate a -# Qt Compressed Help (.qch) of the generated HTML documentation. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can -# be used to specify the file name of the resulting .qch file. -# The path specified is relative to the HTML output folder. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#namespace - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#virtual-folders - -QHP_VIRTUAL_FOLDER = doc - -# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to -# add. For more information please see -# http://doc.trolltech.com/qthelpproject.html#custom-filters - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see -# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters"> -# Qt Help Project / Custom Filters</a>. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's -# filter section matches. -# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes"> -# Qt Help Project / Filter Attributes</a>. - -QHP_SECT_FILTER_ATTRS = - -# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can -# be used to specify the location of Qt's qhelpgenerator. -# If non-empty doxygen will try to run qhelpgenerator on the generated -# .qhp file. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files -# will be generated, which together with the HTML files, form an Eclipse help -# plugin. To install this plugin and make it available under the help contents -# menu in Eclipse, the contents of the directory containing the HTML and XML -# files needs to be copied into the plugins directory of eclipse. The name of -# the directory within the plugins directory should be the same as -# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before -# the help appears. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have -# this name. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index at -# top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. - -DISABLE_INDEX = NO - -# This tag can be used to set the number of enum values (range [1..20]) -# that doxygen will group on one line in the generated HTML documentation. - -ENUM_VALUES_PER_LINE = 4 - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. -# If the tag value is set to YES, a side panel will be generated -# containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). -# Windows users are probably better off using the HTML help feature. - -GENERATE_TREEVIEW = NO - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open -# links to external symbols imported via tag files in a separate window. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of Latex formulas included -# as images in the HTML documentation. The default is 10. Note that -# when you change the font size after a successful doxygen run you need -# to manually remove any form_*.png images from the HTML output directory -# to force them to be regenerated. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are -# not supported properly for IE 6.0, but are supported on all modern browsers. -# Note that when changing this option you need to delete any form_*.png files -# in the HTML output before the changes have effect. - -FORMULA_TRANSPARENT = YES - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box -# for the HTML output. The underlying search engine uses javascript -# and DHTML and should work on any modern browser. Note that when using -# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets -# (GENERATE_DOCSET) there is already a search function so this one should -# typically be disabled. For large projects the javascript based search engine -# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. - -SEARCHENGINE = NO - -# When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a PHP enabled web server instead of at the web client -# using Javascript. Doxygen will generate the search PHP script and index -# file to put on the web server. The advantage of the server -# based approach is that it scales better to large projects and allows -# full text search. The disadvances is that it is more difficult to setup -# and does not have live searching capabilities. - -SERVER_BASED_SEARCH = NO - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = NO - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = latex - -# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be -# invoked. If left blank `latex' will be used as the default command name. -# Note that when enabling USE_PDFLATEX this option is only used for -# generating bitmaps for formulas in the HTML output, but not in the -# Makefile that is written to the output directory. - -LATEX_CMD_NAME = latex - -# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to -# generate index for LaTeX. If left blank `makeindex' will be used as the -# default command name. - -MAKEINDEX_CMD_NAME = makeindex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, a4wide, letter, legal and -# executive. If left blank a4wide will be used. - -PAPER_TYPE = a4wide - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = NO - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = NO - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -# If LATEX_HIDE_INDICES is set to YES then doxygen will not -# include the index chapters (such as File Index, Compound Index, etc.) -# in the output. - -LATEX_HIDE_INDICES = NO - -# If LATEX_SOURCE_CODE is set to YES then doxygen will include -# source code with syntax highlighting in the LaTeX output. -# Note that which sources are shown also depends on other settings -# such as SOURCE_BROWSER. - -LATEX_SOURCE_CODE = NO - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimized for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load stylesheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assignments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = .3 - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. - -GENERATE_XML = NO - -# The XML_OUTPUT tag is used to specify where the XML pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `xml' will be used as the default path. - -XML_OUTPUT = xml - -# The XML_SCHEMA tag can be used to specify an XML schema, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_SCHEMA = - -# The XML_DTD tag can be used to specify an XML DTD, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_DTD = - -# If the XML_PROGRAMLISTING tag is set to YES Doxygen will -# dump the program listings (including syntax highlighting -# and cross-referencing information) to the XML output. Note that -# enabling this will significantly increase the size of the XML output. - -XML_PROGRAMLISTING = YES - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental -# and incomplete at the moment. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- - -# If the GENERATE_PERLMOD tag is set to YES Doxygen will -# generate a Perl module file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_PERLMOD = NO - -# If the PERLMOD_LATEX tag is set to YES Doxygen will generate -# the necessary Makefile rules, Perl scripts and LaTeX code to be able -# to generate PDF and DVI output from the Perl module output. - -PERLMOD_LATEX = NO - -# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be -# nicely formatted so it can be parsed by a human reader. -# This is useful -# if you want to understand what is going on. -# On the other hand, if this -# tag is set to NO the size of the Perl module output will be much smaller -# and Perl will parse it just the same. - -PERLMOD_PRETTY = YES - -# The names of the make variables in the generated doxyrules.make file -# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. -# This is useful so different doxyrules.make files included by the same -# Makefile don't overwrite each other's variables. - -PERLMOD_MAKEVAR_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = YES - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_DEFINED tags. - -EXPAND_ONLY_PREDEF = YES - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# in the INCLUDE_PATH (see below) will be search if a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. To prevent a macro definition from being -# undefined via #undef or recursively expanded use the := operator -# instead of the = operator. - -PREDEFINED = "__attribute__(x)=" \ - "DECLARE_ALIGNED(a,t,n)=t n" \ - "offsetof(x,y)=0x42" \ - av_alloc_size \ - -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition. - -EXPAND_AS_DEFINED = declare_idct \ - READ_PAR_DATA \ - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all function-like macros that are alone -# on a line, have an all uppercase name, and do not end with a semicolon. Such -# function macros are typically used for boiler-plate code, and will confuse -# the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES option can be used to specify one or more tagfiles. -# Optionally an initial location of the external documentation -# can be added for each tagfile. The format of a tag file without -# this location is as follows: -# -# TAGFILES = file1 file2 ... -# Adding location for the tag files is done as follows: -# -# TAGFILES = file1=loc1 "file2 = loc2" ... -# where "loc1" and "loc2" can be relative or absolute paths or -# URLs. If a location is present for each tag, the installdox tool -# does not have to be run to correct the links. -# Note that each tag file must have a unique name -# (where the name does NOT include the path) -# If a tag file is not located in the directory in which doxygen -# is run, you must also specify the path to the tagfile here. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will -# be listed. - -EXTERNAL_GROUPS = YES - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base -# or super classes. Setting the tag to NO turns the diagrams off. Note that -# this option is superseded by the HAVE_DOT option below. This is only a -# fallback. It is recommended to install and use dot, since it yields more -# powerful graphs. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see -# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = NO - -# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is -# allowed to run in parallel. When set to 0 (the default) doxygen will -# base this on the number of processors available in the system. You can set it -# explicitly to a value larger than 0 to get control over the balance -# between CPU load and processing speed. - -DOT_NUM_THREADS = 0 - -# By default doxygen will write a font called FreeSans.ttf to the output -# directory and reference it in all dot files that doxygen generates. This -# font does not include all possible unicode characters however, so when you need -# these (or just want a differently looking font) you can specify the font name -# using DOT_FONTNAME. You need need to make sure dot is able to find the font, -# which can be done by putting it in a standard location or by setting the -# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory -# containing the font. - -DOT_FONTNAME = FreeSans - -# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. -# The default size is 10pt. - -DOT_FONTSIZE = 10 - -# By default doxygen will tell dot to use the output directory to look for the -# FreeSans.ttf font (which doxygen will put there itself). If you specify a -# different font using DOT_FONTNAME you can set the path where dot -# can find it using this tag. - -DOT_FONTPATH = - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# the CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for groups, showing the direct groups dependencies - -GROUP_GRAPHS = YES - -# If the UML_LOOK tag is set to YES doxygen will generate inheritance and -# collaboration diagrams in a style similar to the OMG's Unified Modeling -# Language. - -UML_LOOK = NO - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the CALL_GRAPH and HAVE_DOT options are set to YES then -# doxygen will generate a call dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable call graphs -# for selected functions only using the \callgraph command. - -CALL_GRAPH = NO - -# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then -# doxygen will generate a caller dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable caller -# graphs for selected functions only using the \callergraph command. - -CALLER_GRAPH = NO - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES -# then doxygen will show the dependencies a directory has on other directories -# in a graphical way. The dependency relations are determined by the #include -# relations between the files in the directories. - -DIRECTORY_GRAPH = YES - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. Possible values are png, jpg, or gif -# If left blank png will be used. - -DOT_IMAGE_FORMAT = png - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found in the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of -# nodes that will be shown in the graph. If the number of nodes in a graph -# becomes larger than this value, doxygen will truncate the graph, which is -# visualized by representing a node as a red box. Note that doxygen if the -# number of direct children of the root node in a graph is already larger than -# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note -# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. - -DOT_GRAPH_MAX_NODES = 50 - -# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the -# graphs generated by dot. A depth value of 3 means that only nodes reachable -# from the root by following a path via at most 3 edges will be shown. Nodes -# that lay further from the root node will be omitted. Note that setting this -# option to 1 or 2 may greatly reduce the computation time needed for large -# code bases. Also note that the size of a graph can be further restricted by -# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. - -MAX_DOT_GRAPH_DEPTH = 0 - -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is disabled by default, because dot on Windows does not -# seem to support this out of the box. Warning: Depending on the platform used, -# enabling this option may lead to badly anti-aliased labels on the edges of -# a graph (i.e. they become hard to read). - -DOT_TRANSPARENT = YES - -# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output -# files in one run (i.e. multiple -o and -T options on the command line). This -# makes dot run faster, but since only newer versions of dot (>1.8.10) -# support this, this feature is disabled by default. - -DOT_MULTI_TARGETS = NO - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermediate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES diff --git a/ffmpeg/doc/Makefile b/ffmpeg/doc/Makefile deleted file mode 100644 index 26bd9f5..0000000 --- a/ffmpeg/doc/Makefile +++ /dev/null @@ -1,162 +0,0 @@ -LIBRARIES-$(CONFIG_AVUTIL) += libavutil -LIBRARIES-$(CONFIG_SWSCALE) += libswscale -LIBRARIES-$(CONFIG_SWRESAMPLE) += libswresample -LIBRARIES-$(CONFIG_AVCODEC) += libavcodec -LIBRARIES-$(CONFIG_AVFORMAT) += libavformat -LIBRARIES-$(CONFIG_AVDEVICE) += libavdevice -LIBRARIES-$(CONFIG_AVFILTER) += libavfilter - -COMPONENTS-$(CONFIG_AVUTIL) += ffmpeg-utils -COMPONENTS-$(CONFIG_SWSCALE) += ffmpeg-scaler -COMPONENTS-$(CONFIG_SWRESAMPLE) += ffmpeg-resampler -COMPONENTS-$(CONFIG_AVCODEC) += ffmpeg-codecs ffmpeg-bitstream-filters -COMPONENTS-$(CONFIG_AVFORMAT) += ffmpeg-formats ffmpeg-protocols -COMPONENTS-$(CONFIG_AVDEVICE) += ffmpeg-devices -COMPONENTS-$(CONFIG_AVFILTER) += ffmpeg-filters - -MANPAGES1 = $(AVPROGS-yes:%=doc/%.1) $(AVPROGS-yes:%=doc/%-all.1) $(COMPONENTS-yes:%=doc/%.1) -MANPAGES3 = $(LIBRARIES-yes:%=doc/%.3) -MANPAGES = $(MANPAGES1) $(MANPAGES3) -PODPAGES = $(AVPROGS-yes:%=doc/%.pod) $(AVPROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod) -HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \ - doc/developer.html \ - doc/faq.html \ - doc/fate.html \ - doc/general.html \ - doc/git-howto.html \ - doc/nut.html \ - doc/platform.html \ - -TXTPAGES = doc/fate.txt \ - - -DOCS-$(CONFIG_HTMLPAGES) += $(HTMLPAGES) -DOCS-$(CONFIG_PODPAGES) += $(PODPAGES) -DOCS-$(CONFIG_MANPAGES) += $(MANPAGES) -DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES) -DOCS = $(DOCS-yes) - -DOC_EXAMPLES-$(CONFIG_DECODING_ENCODING_EXAMPLE) += decoding_encoding -DOC_EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding -DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio -DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video -DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata -DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing -DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio -DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video -DOC_EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac -ALL_DOC_EXAMPLES_LIST = $(DOC_EXAMPLES-) $(DOC_EXAMPLES-yes) - -DOC_EXAMPLES := $(DOC_EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF)) -ALL_DOC_EXAMPLES := $(ALL_DOC_EXAMPLES_LIST:%=doc/examples/%$(PROGSSUF)$(EXESUF)) -ALL_DOC_EXAMPLES_G := $(ALL_DOC_EXAMPLES_LIST:%=doc/examples/%$(PROGSSUF)_g$(EXESUF)) -PROGS += $(DOC_EXAMPLES) - -all-$(CONFIG_DOC): doc - -doc: documentation - -apidoc: doc/doxy/html -documentation: $(DOCS) - -examples: $(DOC_EXAMPLES) - -TEXIDEP = perl $(SRC_PATH)/doc/texidep.pl $(SRC_PATH) $< $@ >$(@:%=%.d) - -doc/%.txt: TAG = TXT -doc/%.txt: doc/%.texi - $(Q)$(TEXIDEP) - $(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null - -GENTEXI = format codec -GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi) - -$(GENTEXI): TAG = GENTEXI -$(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF) - $(M)doc/print_options $* > $@ - -doc/%.html: TAG = HTML -doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI) - $(Q)$(TEXIDEP) - $(M)texi2html -I doc -monolithic --D=config-not-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $< - -doc/%-all.html: TAG = HTML -doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI) - $(Q)$(TEXIDEP) - $(M)texi2html -I doc -monolithic --D=config-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $< - -doc/%.pod: TAG = POD -doc/%.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI) - $(Q)$(TEXIDEP) - $(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-not-all=yes -Idoc $< $@ - -doc/%-all.pod: TAG = POD -doc/%-all.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI) - $(Q)$(TEXIDEP) - $(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-all=yes -Idoc $< $@ - -doc/%.1 doc/%.3: TAG = MAN -doc/%.1: doc/%.pod $(GENTEXI) - $(M)pod2man --section=1 --center=" " --release=" " $< > $@ -doc/%.3: doc/%.pod $(GENTEXI) - $(M)pod2man --section=3 --center=" " --release=" " $< > $@ - -$(DOCS) doc/doxy/html: | doc/ -$(DOC_EXAMPLES:%=%.o): | doc/examples -OBJDIRS += doc/examples - -doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(INSTHEADERS) - $(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $^ - -install-doc: install-html install-man - -install-html: - -install-man: - -ifdef CONFIG_HTMLPAGES -install-progs-$(CONFIG_DOC): install-html - -install-html: $(HTMLPAGES) - $(Q)mkdir -p "$(DOCDIR)" - $(INSTALL) -m 644 $(HTMLPAGES) "$(DOCDIR)" -endif - -ifdef CONFIG_MANPAGES -install-progs-$(CONFIG_DOC): install-man - -install-man: $(MANPAGES) - $(Q)mkdir -p "$(MANDIR)/man1" - $(INSTALL) -m 644 $(MANPAGES1) "$(MANDIR)/man1" - $(Q)mkdir -p "$(MANDIR)/man3" - $(INSTALL) -m 644 $(MANPAGES3) "$(MANDIR)/man3" -endif - -uninstall: uninstall-doc - -uninstall-doc: uninstall-html uninstall-man - -uninstall-html: - $(RM) -r "$(DOCDIR)" - -uninstall-man: - $(RM) $(addprefix "$(MANDIR)/man1/",$(AVPROGS-yes:%=%.1) $(AVPROGS-yes:%=%-all.1) $(COMPONENTS-yes:%=%.1)) - $(RM) $(addprefix "$(MANDIR)/man3/",$(LIBRARIES-yes:%=%.3)) - -clean:: docclean - -distclean:: docclean - $(RM) doc/config.texi - -examplesclean: - $(RM) $(ALL_DOC_EXAMPLES) $(ALL_DOC_EXAMPLES_G) - $(RM) $(CLEANSUFFIXES:%=doc/examples/%) - -docclean: examplesclean - $(RM) $(CLEANSUFFIXES:%=doc/%) - $(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 doc/avoptions_*.texi - $(RM) -r doc/doxy/html - --include $(wildcard $(DOCS:%=%.d)) - -.PHONY: apidoc doc documentation diff --git a/ffmpeg/doc/RELEASE_NOTES b/ffmpeg/doc/RELEASE_NOTES deleted file mode 100644 index fae3a2b..0000000 --- a/ffmpeg/doc/RELEASE_NOTES +++ /dev/null @@ -1,16 +0,0 @@ -Release Notes -============= - -* 2.1 "Fourier" October, 2013 - - -General notes -------------- -See the Changelog file for a list of significant changes. Note, there -are many more new features and bugfixes than whats listed there. - -Bugreports against FFmpeg git master or the most recent FFmpeg release are -accepted. If you are experiencing issues with any formally released version of -FFmpeg, please try git master to check if the issue still exists. If it does, -make your report against the development code following the usual bug reporting -guidelines. diff --git a/ffmpeg/doc/authors.texi b/ffmpeg/doc/authors.texi deleted file mode 100644 index 6c8c1d7..0000000 --- a/ffmpeg/doc/authors.texi +++ /dev/null @@ -1,11 +0,0 @@ -@chapter Authors - -The FFmpeg developers. - -For details about the authorship, see the Git history of the project -(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command -@command{git log} in the FFmpeg source directory, or browsing the -online repository at @url{http://source.ffmpeg.org}. - -Maintainers for the specific components are listed in the file -@file{MAINTAINERS} in the source code tree. diff --git a/ffmpeg/doc/bitstream_filters.texi b/ffmpeg/doc/bitstream_filters.texi deleted file mode 100644 index 9bcb12c..0000000 --- a/ffmpeg/doc/bitstream_filters.texi +++ /dev/null @@ -1,126 +0,0 @@ -@chapter Bitstream Filters -@c man begin BITSTREAM FILTERS - -When you configure your FFmpeg build, all the supported bitstream -filters are enabled by default. You can list all available ones using -the configure option @code{--list-bsfs}. - -You can disable all the bitstream filters using the configure option -@code{--disable-bsfs}, and selectively enable any bitstream filter using -the option @code{--enable-bsf=BSF}, or you can disable a particular -bitstream filter using the option @code{--disable-bsf=BSF}. - -The option @code{-bsfs} of the ff* tools will display the list of -all the supported bitstream filters included in your build. - -Below is a description of the currently available bitstream filters. - -@section aac_adtstoasc - -Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration -bitstream filter. - -This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4 -ADTS header and removes the ADTS header. - -This is required for example when copying an AAC stream from a raw -ADTS AAC container to a FLV or a MOV/MP4 file. - -@section chomp - -Remove zero padding at the end of a packet. - -@section dump_extra - -Add extradata to the beginning of the filtered packets. - -The additional argument specifies which packets should be filtered. -It accepts the values: -@table @samp -@item a -add extradata to all key packets, but only if @var{local_header} is -set in the @option{flags2} codec context field - -@item k -add extradata to all key packets - -@item e -add extradata to all packets -@end table - -If not specified it is assumed @samp{k}. - -For example the following @command{ffmpeg} command forces a global -header (thus disabling individual packet headers) in the H.264 packets -generated by the @code{libx264} encoder, but corrects them by adding -the header stored in extradata to the key packets: -@example -ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts -@end example - -@section h264_mp4toannexb - -Convert an H.264 bitstream from length prefixed mode to start code -prefixed mode (as defined in the Annex B of the ITU-T H.264 -specification). - -This is required by some streaming formats, typically the MPEG-2 -transport stream format ("mpegts"). - -For example to remux an MP4 file containing an H.264 stream to mpegts -format with @command{ffmpeg}, you can use the command: - -@example -ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts -@end example - -@section imx_dump_header - -@section mjpeg2jpeg - -Convert MJPEG/AVI1 packets to full JPEG/JFIF packets. - -MJPEG is a video codec wherein each video frame is essentially a -JPEG image. The individual frames can be extracted without loss, -e.g. by - -@example -ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg -@end example - -Unfortunately, these chunks are incomplete JPEG images, because -they lack the DHT segment required for decoding. Quoting from -@url{http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml}: - -Avery Lee, writing in the rec.video.desktop newsgroup in 2001, -commented that "MJPEG, or at least the MJPEG in AVIs having the -MJPG fourcc, is restricted JPEG with a fixed -- and *omitted* -- -Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2, -and it must use basic Huffman encoding, not arithmetic or -progressive. . . . You can indeed extract the MJPEG frames and -decode them with a regular JPEG decoder, but you have to prepend -the DHT segment to them, or else the decoder won't have any idea -how to decompress the data. The exact table necessary is given in -the OpenDML spec." - -This bitstream filter patches the header of frames extracted from an MJPEG -stream (carrying the AVI1 header ID and lacking a DHT segment) to -produce fully qualified JPEG images. - -@example -ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg -exiftran -i -9 frame*.jpg -ffmpeg -i frame_%d.jpg -c:v copy rotated.avi -@end example - -@section mjpega_dump_header - -@section movsub - -@section mp3_header_decompress - -@section noise - -@section remove_extra - -@c man end BITSTREAM FILTERS diff --git a/ffmpeg/doc/build_system.txt b/ffmpeg/doc/build_system.txt deleted file mode 100644 index 36c141e..0000000 --- a/ffmpeg/doc/build_system.txt +++ /dev/null @@ -1,50 +0,0 @@ -FFmpeg currently uses a custom build system, this text attempts to document -some of its obscure features and options. - -Makefile variables: - -V - Disable the default terse mode, the full command issued by make and its - output will be shown on the screen. - -DESTDIR - Destination directory for the install targets, useful to prepare packages - or install FFmpeg in cross-environments. - -Makefile targets: - -all - Default target, builds all the libraries and the executables. - -fate - Run the fate test suite, note you must have installed it - -fate-list - Will list all fate/regression test targets - -install - Install headers, libraries and programs. - -libavformat/output-example - Build the libavformat basic example. - -libavcodec/api-example - Build the libavcodec basic example. - -libswscale/swscale-test - Build the swscale self-test (useful also as example). - - -Useful standard make commands: -make -t <target> - Touch all files that otherwise would be build, this is useful to reduce - unneeded rebuilding when changing headers, but note you must force rebuilds - of files that actually need it by hand then. - -make -j<num> - rebuild with multiple jobs at the same time. Faster on multi processor systems - -make -k - continue build in case of errors, this is useful for the regression tests - sometimes but note it will still not run all reg tests. - diff --git a/ffmpeg/doc/decoders.texi b/ffmpeg/doc/decoders.texi deleted file mode 100644 index 9d9f298..0000000 --- a/ffmpeg/doc/decoders.texi +++ /dev/null @@ -1,202 +0,0 @@ -@chapter Decoders -@c man begin DECODERS - -Decoders are configured elements in FFmpeg which allow the decoding of -multimedia streams. - -When you configure your FFmpeg build, all the supported native decoders -are enabled by default. Decoders requiring an external library must be enabled -manually via the corresponding @code{--enable-lib} option. You can list all -available decoders using the configure option @code{--list-decoders}. - -You can disable all the decoders with the configure option -@code{--disable-decoders} and selectively enable / disable single decoders -with the options @code{--enable-decoder=@var{DECODER}} / -@code{--disable-decoder=@var{DECODER}}. - -The option @code{-codecs} of the ff* tools will display the list of -enabled decoders. - -@c man end DECODERS - -@chapter Video Decoders -@c man begin VIDEO DECODERS - -A description of some of the currently available video decoders -follows. - -@section rawvideo - -Raw video decoder. - -This decoder decodes rawvideo streams. - -@subsection Options - -@table @option -@item top @var{top_field_first} -Specify the assumed field type of the input video. -@table @option -@item -1 -the video is assumed to be progressive (default) -@item 0 -bottom-field-first is assumed -@item 1 -top-field-first is assumed -@end table - -@end table - -@c man end VIDEO DECODERS - -@chapter Audio Decoders -@c man begin AUDIO DECODERS - -@section ffwavesynth - -Internal wave synthetizer. - -This decoder generates wave patterns according to predefined sequences. Its -use is purely internal and the format of the data it accepts is not publicly -documented. - -@section libcelt - -libcelt decoder wrapper. - -libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec. -Requires the presence of the libcelt headers and library during configuration. -You need to explicitly configure the build with @code{--enable-libcelt}. - -@section libgsm - -libgsm decoder wrapper. - -libgsm allows libavcodec to decode the GSM full rate audio codec. Requires -the presence of the libgsm headers and library during configuration. You need -to explicitly configure the build with @code{--enable-libgsm}. - -This decoder supports both the ordinary GSM and the Microsoft variant. - -@section libilbc - -libilbc decoder wrapper. - -libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC) -audio codec. Requires the presence of the libilbc headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libilbc}. - -@subsection Options - -The following option is supported by the libilbc wrapper. - -@table @option -@item enhance - -Enable the enhancement of the decoded audio when set to 1. The default -value is 0 (disabled). - -@end table - -@section libopencore-amrnb - -libopencore-amrnb decoder wrapper. - -libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate -Narrowband audio codec. Using it requires the presence of the -libopencore-amrnb headers and library during configuration. You need to -explicitly configure the build with @code{--enable-libopencore-amrnb}. - -An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB -without this library. - -@section libopencore-amrwb - -libopencore-amrwb decoder wrapper. - -libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate -Wideband audio codec. Using it requires the presence of the -libopencore-amrwb headers and library during configuration. You need to -explicitly configure the build with @code{--enable-libopencore-amrwb}. - -An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB -without this library. - -@section libopus - -libopus decoder wrapper. - -libopus allows libavcodec to decode the Opus Interactive Audio Codec. -Requires the presence of the libopus headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libopus}. - -@c man end AUDIO DECODERS - -@chapter Subtitles Decoders -@c man begin SUBTILES DECODERS - -@section dvdsub - -This codec decodes the bitmap subtitles used in DVDs; the same subtitles can -also be found in VobSub file pairs and in some Matroska files. - -@subsection Options - -@table @option -@item palette -Specify the global palette used by the bitmaps. When stored in VobSub, the -palette is normally specified in the index file; in Matroska, the palette is -stored in the codec extra-data in the same format as in VobSub. In DVDs, the -palette is stored in the IFO file, and therefore not available when reading -from dumped VOB files. - -The format for this option is a string containing 16 24-bits hexadecimal -numbers (without 0x prefix) separated by comas, for example @code{0d00ee, -ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1, -7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}. -@end table - -@section libzvbi-teletext - -Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext -subtitles. Requires the presence of the libzvbi headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libzvbi}. - -@subsection Options - -@table @option -@item txt_page -List of teletext page numbers to decode. You may use the special * string to -match all pages. Pages that do not match the specified list are dropped. -Default value is *. -@item txt_chop_top -Discards the top teletext line. Default value is 1. -@item txt_format -Specifies the format of the decoded subtitles. The teletext decoder is capable -of decoding the teletext pages to bitmaps or to simple text, you should use -"bitmap" for teletext pages, because certain graphics and colors cannot be -expressed in simple text. You might use "text" for teletext based subtitles if -your application can handle simple text based subtitles. Default value is -bitmap. -@item txt_left -X offset of generated bitmaps, default is 0. -@item txt_top -Y offset of generated bitmaps, default is 0. -@item txt_chop_spaces -Chops leading and trailing spaces and removes empty lines from the generated -text. This option is useful for teletext based subtitles where empty spaces may -be present at the start or at the end of the lines or empty lines may be -present between the subtitle lines because of double-sized teletext charactes. -Default value is 1. -@item txt_duration -Sets the display duration of the decoded teletext pages or subtitles in -miliseconds. Default value is 30000 which is 30 seconds. -@item txt_transparent -Force transparent background of the generated teletext bitmaps. Default value -is 0 which means an opaque (black) background. -@end table - -@c man end SUBTILES DECODERS diff --git a/ffmpeg/doc/default.css b/ffmpeg/doc/default.css deleted file mode 100644 index bf50200..0000000 --- a/ffmpeg/doc/default.css +++ /dev/null @@ -1,165 +0,0 @@ -a.summary-letter { - text-decoration: none; -} - -a { - color: #2D6198; -} - -a:visited { - color: #884488; -} - -#banner { - background-color: white; - position: relative; - text-align: center; -} - -#banner img { - margin-bottom: 1px; - margin-top: 5px; -} - -#body { - margin-left: 1em; - margin-right: 1em; -} - -body { - background-color: #313131; - margin: 0; - text-align: justify; -} - -.center { - margin-left: auto; - margin-right: auto; - text-align: center; -} - -#container { - background-color: white; - color: #202020; - margin-left: 1em; - margin-right: 1em; -} - -#footer { - text-align: center; -} - -h1 a, h2 a, h3 a, h4 a { - text-decoration: inherit; - color: inherit; -} - -h1, h2, h3, h4 { - padding-left: 0.4em; - border-radius: 4px; - padding-bottom: 0.25em; - padding-top: 0.25em; - border: 1px solid #6A996A; -} - -h1 { - background-color: #7BB37B; - color: #151515; - font-size: 1.2em; - padding-bottom: 0.3em; - padding-top: 0.3em; -} - -h2 { - color: #313131; - font-size: 1.0em; - background-color: #ABE3AB; -} - -h3 { - color: #313131; - font-size: 0.9em; - margin-bottom: -6px; - background-color: #BBF3BB; -} - -h4 { - color: #313131; - font-size: 0.8em; - margin-bottom: -8px; - background-color: #D1FDD1; -} - -img { - border: 0; -} - -#navbar { - background-color: #738073; - border-bottom: 1px solid #5C665C; - border-top: 1px solid #5C665C; - margin-top: 12px; - padding: 0.3em; - position: relative; - text-align: center; -} - -#navbar a, #navbar_secondary a { - color: white; - padding: 0.3em; - text-decoration: none; -} - -#navbar a:hover, #navbar_secondary a:hover { - background-color: #313131; - color: white; - text-decoration: none; -} - -#navbar_secondary { - background-color: #738073; - border-bottom: 1px solid #5C665C; - border-left: 1px solid #5C665C; - border-right: 1px solid #5C665C; - padding: 0.3em; - position: relative; - text-align: center; -} - -p { - margin-left: 1em; - margin-right: 1em; -} - -pre { - margin-left: 3em; - margin-right: 3em; - padding: 0.3em; - border: 1px solid #bbb; - background-color: #f7f7f7; -} - -dl dt { - font-weight: bold; -} - -#proj_desc { - font-size: 1.2em; -} - -#repos { - margin-left: 1em; - margin-right: 1em; - border-collapse: collapse; - border: solid 1px #6A996A; -} - -#repos th { - background-color: #7BB37B; - border: solid 1px #6A996A; -} - -#repos td { - padding: 0.2em; - border: solid 1px #6A996A; -} diff --git a/ffmpeg/doc/demuxers.texi b/ffmpeg/doc/demuxers.texi deleted file mode 100644 index bfc0bdc..0000000 --- a/ffmpeg/doc/demuxers.texi +++ /dev/null @@ -1,375 +0,0 @@ -@chapter Demuxers -@c man begin DEMUXERS - -Demuxers are configured elements in FFmpeg that can read the -multimedia streams from a particular type of file. - -When you configure your FFmpeg build, all the supported demuxers -are enabled by default. You can list all available ones using the -configure option @code{--list-demuxers}. - -You can disable all the demuxers using the configure option -@code{--disable-demuxers}, and selectively enable a single demuxer with -the option @code{--enable-demuxer=@var{DEMUXER}}, or disable it -with the option @code{--disable-demuxer=@var{DEMUXER}}. - -The option @code{-formats} of the ff* tools will display the list of -enabled demuxers. - -The description of some of the currently available demuxers follows. - -@section applehttp - -Apple HTTP Live Streaming demuxer. - -This demuxer presents all AVStreams from all variant streams. -The id field is set to the bitrate variant index number. By setting -the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay), -the caller can decide which variant streams to actually receive. -The total bitrate of the variant that the stream belongs to is -available in a metadata key named "variant_bitrate". - -@section asf - -Advanced Systems Format demuxer. - -This demuxer is used to demux ASF files and MMS network streams. - -@table @option -@item -no_resync_search @var{bool} -Do not try to resynchronize by looking for a certain optional start code. -@end table - -@anchor{concat} -@section concat - -Virtual concatenation script demuxer. - -This demuxer reads a list of files and other directives from a text file and -demuxes them one after the other, as if all their packet had been muxed -together. - -The timestamps in the files are adjusted so that the first file starts at 0 -and each next file starts where the previous one finishes. Note that it is -done globally and may cause gaps if all streams do not have exactly the same -length. - -All files must have the same streams (same codecs, same time base, etc.). - -The duration of each file is used to adjust the timestamps of the next file: -if the duration is incorrect (because it was computed using the bit-rate or -because the file is truncated, for example), it can cause artifacts. The -@code{duration} directive can be used to override the duration stored in -each file. - -@subsection Syntax - -The script is a text file in extended-ASCII, with one directive per line. -Empty lines, leading spaces and lines starting with '#' are ignored. The -following directive is recognized: - -@table @option - -@item @code{file @var{path}} -Path to a file to read; special characters and spaces must be escaped with -backslash or single quotes. - -All subsequent directives apply to that file. - -@item @code{ffconcat version 1.0} -Identify the script type and version. It also sets the @option{safe} option -to 1 if it was to its default -1. - -To make FFmpeg recognize the format automatically, this directive must -appears exactly as is (no extra space or byte-order-mark) on the very first -line of the script. - -@item @code{duration @var{dur}} -Duration of the file. This information can be specified from the file; -specifying it here may be more efficient or help if the information from the -file is not available or accurate. - -If the duration is set for all files, then it is possible to seek in the -whole concatenated video. - -@end table - -@subsection Options - -This demuxer accepts the following option: - -@table @option - -@item safe -If set to 1, reject unsafe file paths. A file path is considered safe if it -does not contain a protocol specification and is relative and all components -only contain characters from the portable character set (letters, digits, -period, underscore and hyphen) and have no period at the beginning of a -component. - -If set to 0, any file name is accepted. - -The default is -1, it is equivalent to 1 if the format was automatically -probed and 0 otherwise. - -@end table - -@section flv - -Adobe Flash Video Format demuxer. - -This demuxer is used to demux FLV files and RTMP network streams. - -@table @option -@item -flv_metadata @var{bool} -Allocate the streams according to the onMetaData array content. -@end table - -@section libgme - -The Game Music Emu library is a collection of video game music file emulators. - -See @url{http://code.google.com/p/game-music-emu/} for more information. - -Some files have multiple tracks. The demuxer will pick the first track by -default. The @option{track_index} option can be used to select a different -track. Track indexes start at 0. The demuxer exports the number of tracks as -@var{tracks} meta data entry. - -For very large files, the @option{max_size} option may have to be adjusted. - -@section libquvi - -Play media from Internet services using the quvi project. - -The demuxer accepts a @option{format} option to request a specific quality. It -is by default set to @var{best}. - -See @url{http://quvi.sourceforge.net/} for more information. - -FFmpeg needs to be built with @code{--enable-libquvi} for this demuxer to be -enabled. - -@section image2 - -Image file demuxer. - -This demuxer reads from a list of image files specified by a pattern. -The syntax and meaning of the pattern is specified by the -option @var{pattern_type}. - -The pattern may contain a suffix which is used to automatically -determine the format of the images contained in the files. - -The size, the pixel format, and the format of each image must be the -same for all the files in the sequence. - -This demuxer accepts the following options: -@table @option -@item framerate -Set the frame rate for the video stream. It defaults to 25. -@item loop -If set to 1, loop over the input. Default value is 0. -@item pattern_type -Select the pattern type used to interpret the provided filename. - -@var{pattern_type} accepts one of the following values. -@table @option -@item sequence -Select a sequence pattern type, used to specify a sequence of files -indexed by sequential numbers. - -A sequence pattern may contain the string "%d" or "%0@var{N}d", which -specifies the position of the characters representing a sequential -number in each filename matched by the pattern. If the form -"%d0@var{N}d" is used, the string representing the number in each -filename is 0-padded and @var{N} is the total number of 0-padded -digits representing the number. The literal character '%' can be -specified in the pattern with the string "%%". - -If the sequence pattern contains "%d" or "%0@var{N}d", the first filename of -the file list specified by the pattern must contain a number -inclusively contained between @var{start_number} and -@var{start_number}+@var{start_number_range}-1, and all the following -numbers must be sequential. - -For example the pattern "img-%03d.bmp" will match a sequence of -filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ..., -@file{img-010.bmp}, etc.; the pattern "i%%m%%g-%d.jpg" will match a -sequence of filenames of the form @file{i%m%g-1.jpg}, -@file{i%m%g-2.jpg}, ..., @file{i%m%g-10.jpg}, etc. - -Note that the pattern must not necessarily contain "%d" or -"%0@var{N}d", for example to convert a single image file -@file{img.jpeg} you can employ the command: -@example -ffmpeg -i img.jpeg img.png -@end example - -@item glob -Select a glob wildcard pattern type. - -The pattern is interpreted like a @code{glob()} pattern. This is only -selectable if libavformat was compiled with globbing support. - -@item glob_sequence @emph{(deprecated, will be removed)} -Select a mixed glob wildcard/sequence pattern. - -If your version of libavformat was compiled with globbing support, and -the provided pattern contains at least one glob meta character among -@code{%*?[]@{@}} that is preceded by an unescaped "%", the pattern is -interpreted like a @code{glob()} pattern, otherwise it is interpreted -like a sequence pattern. - -All glob special characters @code{%*?[]@{@}} must be prefixed -with "%". To escape a literal "%" you shall use "%%". - -For example the pattern @code{foo-%*.jpeg} will match all the -filenames prefixed by "foo-" and terminating with ".jpeg", and -@code{foo-%?%?%?.jpeg} will match all the filenames prefixed with -"foo-", followed by a sequence of three characters, and terminating -with ".jpeg". - -This pattern type is deprecated in favor of @var{glob} and -@var{sequence}. -@end table - -Default value is @var{glob_sequence}. -@item pixel_format -Set the pixel format of the images to read. If not specified the pixel -format is guessed from the first image file in the sequence. -@item start_number -Set the index of the file matched by the image file pattern to start -to read from. Default value is 0. -@item start_number_range -Set the index interval range to check when looking for the first image -file in the sequence, starting from @var{start_number}. Default value -is 5. -@item ts_from_file -If set to 1, will set frame timestamp to modification time of image file. Note -that monotonity of timestamps is not provided: images go in the same order as -without this option. Default value is 0. -@item video_size -Set the video size of the images to read. If not specified the video -size is guessed from the first image file in the sequence. -@end table - -@subsection Examples - -@itemize -@item -Use @command{ffmpeg} for creating a video from the images in the file -sequence @file{img-001.jpeg}, @file{img-002.jpeg}, ..., assuming an -input frame rate of 10 frames per second: -@example -ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv -@end example - -@item -As above, but start by reading from a file with index 100 in the sequence: -@example -ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv -@end example - -@item -Read images matching the "*.png" glob pattern , that is all the files -terminating with the ".png" suffix: -@example -ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv -@end example -@end itemize - -@section mpegts - -MPEG-2 transport stream demuxer. - -@table @option - -@item fix_teletext_pts -Overrides teletext packet PTS and DTS values with the timestamps calculated -from the PCR of the first program which the teletext stream is part of and is -not discarded. Default value is 1, set this option to 0 if you want your -teletext packet PTS and DTS values untouched. -@end table - -@section rawvideo - -Raw video demuxer. - -This demuxer allows to read raw video data. Since there is no header -specifying the assumed video parameters, the user must specify them -in order to be able to decode the data correctly. - -This demuxer accepts the following options: -@table @option - -@item framerate -Set input video frame rate. Default value is 25. - -@item pixel_format -Set the input video pixel format. Default value is @code{yuv420p}. - -@item video_size -Set the input video size. This value must be specified explicitly. -@end table - -For example to read a rawvideo file @file{input.raw} with -@command{ffplay}, assuming a pixel format of @code{rgb24}, a video -size of @code{320x240}, and a frame rate of 10 images per second, use -the command: -@example -ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw -@end example - -@section sbg - -SBaGen script demuxer. - -This demuxer reads the script language used by SBaGen -@url{http://uazu.net/sbagen/} to generate binaural beats sessions. A SBG -script looks like that: -@example --SE -a: 300-2.5/3 440+4.5/0 -b: 300-2.5/0 440+4.5/3 -off: - -NOW == a -+0:07:00 == b -+0:14:00 == a -+0:21:00 == b -+0:30:00 off -@end example - -A SBG script can mix absolute and relative timestamps. If the script uses -either only absolute timestamps (including the script start time) or only -relative ones, then its layout is fixed, and the conversion is -straightforward. On the other hand, if the script mixes both kind of -timestamps, then the @var{NOW} reference for relative timestamps will be -taken from the current time of day at the time the script is read, and the -script layout will be frozen according to that reference. That means that if -the script is directly played, the actual times will match the absolute -timestamps up to the sound controller's clock accuracy, but if the user -somehow pauses the playback or seeks, all times will be shifted accordingly. - -@section tedcaptions - -JSON captions used for @url{http://www.ted.com/, TED Talks}. - -TED does not provide links to the captions, but they can be guessed from the -page. The file @file{tools/bookmarklets.html} from the FFmpeg source tree -contains a bookmarklet to expose them. - -This demuxer accepts the following option: -@table @option -@item start_time -Set the start time of the TED talk, in milliseconds. The default is 15000 -(15s). It is used to sync the captions with the downloadable videos, because -they include a 15s intro. -@end table - -Example: convert the captions to a format most players understand: -@example -ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt -@end example - -@c man end DEMUXERS diff --git a/ffmpeg/doc/developer.texi b/ffmpeg/doc/developer.texi deleted file mode 100644 index 1e1d3b8..0000000 --- a/ffmpeg/doc/developer.texi +++ /dev/null @@ -1,797 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Developer Documentation -@titlepage -@center @titlefont{Developer Documentation} -@end titlepage - -@top - -@contents - -@chapter Developers Guide - -@section Notes for external developers - -This document is mostly useful for internal FFmpeg developers. -External developers who need to use the API in their application should -refer to the API doxygen documentation in the public headers, and -check the examples in @file{doc/examples} and in the source code to -see how the public API is employed. - -You can use the FFmpeg libraries in your commercial program, but you -are encouraged to @emph{publish any patch you make}. In this case the -best way to proceed is to send your patches to the ffmpeg-devel -mailing list following the guidelines illustrated in the remainder of -this document. - -For more detailed legal information about the use of FFmpeg in -external programs read the @file{LICENSE} file in the source tree and -consult @url{http://ffmpeg.org/legal.html}. - -@section Contributing - -There are 3 ways by which code gets into ffmpeg. -@itemize @bullet -@item Submitting Patches to the main developer mailing list - see @ref{Submitting patches} for details. -@item Directly committing changes to the main tree. -@item Committing changes to a git clone, for example on github.com or - gitorious.org. And asking us to merge these changes. -@end itemize - -Whichever way, changes should be reviewed by the maintainer of the code -before they are committed. And they should follow the @ref{Coding Rules}. -The developer making the commit and the author are responsible for their changes -and should try to fix issues their commit causes. - -@anchor{Coding Rules} -@section Coding Rules - -@subsection Code formatting conventions - -There are the following guidelines regarding the indentation in files: - -@itemize @bullet -@item -Indent size is 4. - -@item -The TAB character is forbidden outside of Makefiles as is any -form of trailing whitespace. Commits containing either will be -rejected by the git repository. - -@item -You should try to limit your code lines to 80 characters; however, do so if -and only if this improves readability. -@end itemize -The presentation is one inspired by 'indent -i4 -kr -nut'. - -The main priority in FFmpeg is simplicity and small code size in order to -minimize the bug count. - -@subsection Comments -Use the JavaDoc/Doxygen format (see examples below) so that code documentation -can be generated automatically. All nontrivial functions should have a comment -above them explaining what the function does, even if it is just one sentence. -All structures and their member variables should be documented, too. - -Avoid Qt-style and similar Doxygen syntax with @code{!} in it, i.e. replace -@code{//!} with @code{///} and similar. Also @@ syntax should be employed -for markup commands, i.e. use @code{@@param} and not @code{\param}. - -@example -/** - * @@file - * MPEG codec. - * @@author ... - */ - -/** - * Summary sentence. - * more text ... - * ... - */ -typedef struct Foobar @{ - int var1; /**< var1 description */ - int var2; ///< var2 description - /** var3 description */ - int var3; -@} Foobar; - -/** - * Summary sentence. - * more text ... - * ... - * @@param my_parameter description of my_parameter - * @@return return value description - */ -int myfunc(int my_parameter) -... -@end example - -@subsection C language features - -FFmpeg is programmed in the ISO C90 language with a few additional -features from ISO C99, namely: - -@itemize @bullet -@item -the @samp{inline} keyword; - -@item -@samp{//} comments; - -@item -designated struct initializers (@samp{struct s x = @{ .i = 17 @};}) - -@item -compound literals (@samp{x = (struct s) @{ 17, 23 @};}) -@end itemize - -These features are supported by all compilers we care about, so we will not -accept patches to remove their use unless they absolutely do not impair -clarity and performance. - -All code must compile with recent versions of GCC and a number of other -currently supported compilers. To ensure compatibility, please do not use -additional C99 features or GCC extensions. Especially watch out for: - -@itemize @bullet -@item -mixing statements and declarations; - -@item -@samp{long long} (use @samp{int64_t} instead); - -@item -@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar; - -@item -GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}). -@end itemize - -@subsection Naming conventions -All names should be composed with underscores (_), not CamelCase. For example, -@samp{avfilter_get_video_buffer} is an acceptable function name and -@samp{AVFilterGetVideo} is not. The exception from this are type names, like -for example structs and enums; they should always be in the CamelCase - -There are the following conventions for naming variables and functions: - -@itemize @bullet -@item -For local variables no prefix is required. - -@item -For file-scope variables and functions declared as @code{static}, no prefix -is required. - -@item -For variables and functions visible outside of file scope, but only used -internally by a library, an @code{ff_} prefix should be used, -e.g. @samp{ff_w64_demuxer}. - -@item -For variables and functions visible outside of file scope, used internally -across multiple libraries, use @code{avpriv_} as prefix, for example, -@samp{avpriv_aac_parse_header}. - -@item -Each library has its own prefix for public symbols, in addition to the -commonly used @code{av_} (@code{avformat_} for libavformat, -@code{avcodec_} for libavcodec, @code{swr_} for libswresample, etc). -Check the existing code and choose names accordingly. -Note that some symbols without these prefixes are also exported for -retro-compatibility reasons. These exceptions are declared in the -@code{lib<name>/lib<name>.v} files. -@end itemize - -Furthermore, name space reserved for the system should not be invaded. -Identifiers ending in @code{_t} are reserved by -@url{http://pubs.opengroup.org/onlinepubs/007904975/functions/xsh_chap02_02.html#tag_02_02_02, POSIX}. -Also avoid names starting with @code{__} or @code{_} followed by an uppercase -letter as they are reserved by the C standard. Names starting with @code{_} -are reserved at the file level and may not be used for externally visible -symbols. If in doubt, just avoid names starting with @code{_} altogether. - -@subsection Miscellaneous conventions - -@itemize @bullet -@item -fprintf and printf are forbidden in libavformat and libavcodec, -please use av_log() instead. - -@item -Casts should be used only when necessary. Unneeded parentheses -should also be avoided if they don't make the code easier to understand. -@end itemize - -@subsection Editor configuration -In order to configure Vim to follow FFmpeg formatting conventions, paste -the following snippet into your @file{.vimrc}: -@example -" indentation rules for FFmpeg: 4 spaces, no tabs -set expandtab -set shiftwidth=4 -set softtabstop=4 -set cindent -set cinoptions=(0 -" Allow tabs in Makefiles. -autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8 -" Trailing whitespace and tabs are forbidden, so highlight them. -highlight ForbiddenWhitespace ctermbg=red guibg=red -match ForbiddenWhitespace /\s\+$\|\t/ -" Do not highlight spaces at the end of line while typing on that line. -autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/ -@end example - -For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}: -@example -(c-add-style "ffmpeg" - '("k&r" - (c-basic-offset . 4) - (indent-tabs-mode . nil) - (show-trailing-whitespace . t) - (c-offsets-alist - (statement-cont . (c-lineup-assignments +))) - ) - ) -(setq c-default-style "ffmpeg") -@end example - -@section Development Policy - -@enumerate -@item -Contributions should be licensed under the -@uref{http://www.gnu.org/licenses/lgpl-2.1.html, LGPL 2.1}, -including an "or any later version" clause, or, if you prefer -a gift-style license, the -@uref{http://opensource.org/licenses/isc-license.txt, ISC} or -@uref{http://mit-license.org/, MIT} license. -@uref{http://www.gnu.org/licenses/gpl-2.0.html, GPL 2} including -an "or any later version" clause is also acceptable, but LGPL is -preferred. -If you add a new file, give it a proper license header. Do not copy and -paste it from a random place, use an existing file as template. - -@item -You must not commit code which breaks FFmpeg! (Meaning unfinished but -enabled code which breaks compilation or compiles but does not work or -breaks the regression tests) -You can commit unfinished stuff (for testing etc), but it must be disabled -(#ifdef etc) by default so it does not interfere with other developers' -work. - -@item -The commit message should have a short first line in the form of -a @samp{topic: short description} as a header, separated by a newline -from the body consisting of an explanation of why the change is necessary. -If the commit fixes a known bug on the bug tracker, the commit message -should include its bug ID. Referring to the issue on the bug tracker does -not exempt you from writing an excerpt of the bug in the commit message. - -@item -You do not have to over-test things. If it works for you, and you think it -should work for others, then commit. If your code has problems -(portability, triggers compiler bugs, unusual environment etc) they will be -reported and eventually fixed. - -@item -Do not commit unrelated changes together, split them into self-contained -pieces. Also do not forget that if part B depends on part A, but A does not -depend on B, then A can and should be committed first and separate from B. -Keeping changes well split into self-contained parts makes reviewing and -understanding them on the commit log mailing list easier. This also helps -in case of debugging later on. -Also if you have doubts about splitting or not splitting, do not hesitate to -ask/discuss it on the developer mailing list. - -@item -Do not change behavior of the programs (renaming options etc) or public -API or ABI without first discussing it on the ffmpeg-devel mailing list. -Do not remove functionality from the code. Just improve! - -Note: Redundant code can be removed. - -@item -Do not commit changes to the build system (Makefiles, configure script) -which change behavior, defaults etc, without asking first. The same -applies to compiler warning fixes, trivial looking fixes and to code -maintained by other developers. We usually have a reason for doing things -the way we do. Send your changes as patches to the ffmpeg-devel mailing -list, and if the code maintainers say OK, you may commit. This does not -apply to files you wrote and/or maintain. - -@item -We refuse source indentation and other cosmetic changes if they are mixed -with functional changes, such commits will be rejected and removed. Every -developer has his own indentation style, you should not change it. Of course -if you (re)write something, you can use your own style, even though we would -prefer if the indentation throughout FFmpeg was consistent (Many projects -force a given indentation style - we do not.). If you really need to make -indentation changes (try to avoid this), separate them strictly from real -changes. - -NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code, -then either do NOT change the indentation of the inner part within (do not -move it to the right)! or do so in a separate commit - -@item -Always fill out the commit log message. Describe in a few lines what you -changed and why. You can refer to mailing list postings if you fix a -particular bug. Comments such as "fixed!" or "Changed it." are unacceptable. -Recommended format: -area changed: Short 1 line description - -details describing what and why and giving references. - -@item -Make sure the author of the commit is set correctly. (see git commit --author) -If you apply a patch, send an -answer to ffmpeg-devel (or wherever you got the patch from) saying that -you applied the patch. - -@item -When applying patches that have been discussed (at length) on the mailing -list, reference the thread in the log message. - -@item -Do NOT commit to code actively maintained by others without permission. -Send a patch to ffmpeg-devel instead. If no one answers within a reasonable -timeframe (12h for build failures and security fixes, 3 days small changes, -1 week for big patches) then commit your patch if you think it is OK. -Also note, the maintainer can simply ask for more time to review! - -@item -Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits -are sent there and reviewed by all the other developers. Bugs and possible -improvements or general questions regarding commits are discussed there. We -expect you to react if problems with your code are uncovered. - -@item -Update the documentation if you change behavior or add features. If you are -unsure how best to do this, send a patch to ffmpeg-devel, the documentation -maintainer(s) will review and commit your stuff. - -@item -Try to keep important discussions and requests (also) on the public -developer mailing list, so that all developers can benefit from them. - -@item -Never write to unallocated memory, never write over the end of arrays, -always check values read from some untrusted source before using them -as array index or other risky things. - -@item -Remember to check if you need to bump versions for the specific libav* -parts (libavutil, libavcodec, libavformat) you are changing. You need -to change the version integer. -Incrementing the first component means no backward compatibility to -previous versions (e.g. removal of a function from the public API). -Incrementing the second component means backward compatible change -(e.g. addition of a function to the public API or extension of an -existing data structure). -Incrementing the third component means a noteworthy binary compatible -change (e.g. encoder bug fix that matters for the decoder). The third -component always starts at 100 to distinguish FFmpeg from Libav. - -@item -Compiler warnings indicate potential bugs or code with bad style. If a type of -warning always points to correct and clean code, that warning should -be disabled, not the code changed. -Thus the remaining warnings can either be bugs or correct code. -If it is a bug, the bug has to be fixed. If it is not, the code should -be changed to not generate a warning unless that causes a slowdown -or obfuscates the code. - -@item -Make sure that no parts of the codebase that you maintain are missing from the -@file{MAINTAINERS} file. If something that you want to maintain is missing add it with -your name after it. -If at some point you no longer want to maintain some code, then please help -finding a new maintainer and also don't forget updating the @file{MAINTAINERS} file. -@end enumerate - -We think our rules are not too hard. If you have comments, contact us. - -@anchor{Submitting patches} -@section Submitting patches - -First, read the @ref{Coding Rules} above if you did not yet, in particular -the rules regarding patch submission. - -When you submit your patch, please use @code{git format-patch} or -@code{git send-email}. We cannot read other diffs :-) - -Also please do not submit a patch which contains several unrelated changes. -Split it into separate, self-contained pieces. This does not mean splitting -file by file. Instead, make the patch as small as possible while still -keeping it as a logical unit that contains an individual change, even -if it spans multiple files. This makes reviewing your patches much easier -for us and greatly increases your chances of getting your patch applied. - -Use the patcheck tool of FFmpeg to check your patch. -The tool is located in the tools directory. - -Run the @ref{Regression tests} before submitting a patch in order to verify -it does not cause unexpected problems. - -It also helps quite a bit if you tell us what the patch does (for example -'replaces lrint by lrintf'), and why (for example '*BSD isn't C99 compliant -and has no lrint()') - -Also please if you send several patches, send each patch as a separate mail, -do not attach several unrelated patches to the same mail. - -Patches should be posted to the -@uref{http://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel, ffmpeg-devel} -mailing list. Use @code{git send-email} when possible since it will properly -send patches without requiring extra care. If you cannot, then send patches -as base64-encoded attachments, so your patch is not trashed during -transmission. - -Your patch will be reviewed on the mailing list. You will likely be asked -to make some changes and are expected to send in an improved version that -incorporates the requests from the review. This process may go through -several iterations. Once your patch is deemed good enough, some developer -will pick it up and commit it to the official FFmpeg tree. - -Give us a few days to react. But if some time passes without reaction, -send a reminder by email. Your patch should eventually be dealt with. - - -@section New codecs or formats checklist - -@enumerate -@item -Did you use av_cold for codec initialization and close functions? - -@item -Did you add a long_name under NULL_IF_CONFIG_SMALL to the AVCodec or -AVInputFormat/AVOutputFormat struct? - -@item -Did you bump the minor version number (and reset the micro version -number) in @file{libavcodec/version.h} or @file{libavformat/version.h}? - -@item -Did you register it in @file{allcodecs.c} or @file{allformats.c}? - -@item -Did you add the AVCodecID to @file{avcodec.h}? -When adding new codec IDs, also add an entry to the codec descriptor -list in @file{libavcodec/codec_desc.c}. - -@item -If it has a FourCC, did you add it to @file{libavformat/riff.c}, -even if it is only a decoder? - -@item -Did you add a rule to compile the appropriate files in the Makefile? -Remember to do this even if you're just adding a format to a file that is -already being compiled by some other rule, like a raw demuxer. - -@item -Did you add an entry to the table of supported formats or codecs in -@file{doc/general.texi}? - -@item -Did you add an entry in the Changelog? - -@item -If it depends on a parser or a library, did you add that dependency in -configure? - -@item -Did you @code{git add} the appropriate files before committing? - -@item -Did you make sure it compiles standalone, i.e. with -@code{configure --disable-everything --enable-decoder=foo} -(or @code{--enable-demuxer} or whatever your component is)? -@end enumerate - - -@section patch submission checklist - -@enumerate -@item -Does @code{make fate} pass with the patch applied? - -@item -Was the patch generated with git format-patch or send-email? - -@item -Did you sign off your patch? (git commit -s) -See @url{http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches} for the meaning -of sign off. - -@item -Did you provide a clear git commit log message? - -@item -Is the patch against latest FFmpeg git master branch? - -@item -Are you subscribed to ffmpeg-devel? -(the list is subscribers only due to spam) - -@item -Have you checked that the changes are minimal, so that the same cannot be -achieved with a smaller patch and/or simpler final code? - -@item -If the change is to speed critical code, did you benchmark it? - -@item -If you did any benchmarks, did you provide them in the mail? - -@item -Have you checked that the patch does not introduce buffer overflows or -other security issues? - -@item -Did you test your decoder or demuxer against damaged data? If no, see -tools/trasher, the noise bitstream filter, and -@uref{http://caca.zoy.org/wiki/zzuf, zzuf}. Your decoder or demuxer -should not crash, end in a (near) infinite loop, or allocate ridiculous -amounts of memory when fed damaged data. - -@item -Does the patch not mix functional and cosmetic changes? - -@item -Did you add tabs or trailing whitespace to the code? Both are forbidden. - -@item -Is the patch attached to the email you send? - -@item -Is the mime type of the patch correct? It should be text/x-diff or -text/x-patch or at least text/plain and not application/octet-stream. - -@item -If the patch fixes a bug, did you provide a verbose analysis of the bug? - -@item -If the patch fixes a bug, did you provide enough information, including -a sample, so the bug can be reproduced and the fix can be verified? -Note please do not attach samples >100k to mails but rather provide a -URL, you can upload to ftp://upload.ffmpeg.org - -@item -Did you provide a verbose summary about what the patch does change? - -@item -Did you provide a verbose explanation why it changes things like it does? - -@item -Did you provide a verbose summary of the user visible advantages and -disadvantages if the patch is applied? - -@item -Did you provide an example so we can verify the new feature added by the -patch easily? - -@item -If you added a new file, did you insert a license header? It should be -taken from FFmpeg, not randomly copied and pasted from somewhere else. - -@item -You should maintain alphabetical order in alphabetically ordered lists as -long as doing so does not break API/ABI compatibility. - -@item -Lines with similar content should be aligned vertically when doing so -improves readability. - -@item -Consider to add a regression test for your code. - -@item -If you added YASM code please check that things still work with --disable-yasm - -@item -Make sure you check the return values of function and return appropriate -error codes. Especially memory allocation functions like @code{av_malloc()} -are notoriously left unchecked, which is a serious problem. - -@item -Test your code with valgrind and or Address Sanitizer to ensure it's free -of leaks, out of array accesses, etc. -@end enumerate - -@section Patch review process - -All patches posted to ffmpeg-devel will be reviewed, unless they contain a -clear note that the patch is not for the git master branch. -Reviews and comments will be posted as replies to the patch on the -mailing list. The patch submitter then has to take care of every comment, -that can be by resubmitting a changed patch or by discussion. Resubmitted -patches will themselves be reviewed like any other patch. If at some point -a patch passes review with no comments then it is approved, that can for -simple and small patches happen immediately while large patches will generally -have to be changed and reviewed many times before they are approved. -After a patch is approved it will be committed to the repository. - -We will review all submitted patches, but sometimes we are quite busy so -especially for large patches this can take several weeks. - -If you feel that the review process is too slow and you are willing to try to -take over maintainership of the area of code you change then just clone -git master and maintain the area of code there. We will merge each area from -where its best maintained. - -When resubmitting patches, please do not make any significant changes -not related to the comments received during review. Such patches will -be rejected. Instead, submit significant changes or new features as -separate patches. - -@anchor{Regression tests} -@section Regression tests - -Before submitting a patch (or committing to the repository), you should at least -test that you did not break anything. - -Running 'make fate' accomplishes this, please see @url{fate.html} for details. - -[Of course, some patches may change the results of the regression tests. In -this case, the reference results of the regression tests shall be modified -accordingly]. - -@subsection Adding files to the fate-suite dataset - -When there is no muxer or encoder available to generate test media for a -specific test then the media has to be inlcuded in the fate-suite. -First please make sure that the sample file is as small as possible to test the -respective decoder or demuxer sufficiently. Large files increase network -bandwidth and disk space requirements. -Once you have a working fate test and fate sample, provide in the commit -message or introductionary message for the patch series that you post to -the ffmpeg-devel mailing list, a direct link to download the sample media. - - -@subsection Visualizing Test Coverage - -The FFmpeg build system allows visualizing the test coverage in an easy -manner with the coverage tools @code{gcov}/@code{lcov}. This involves -the following steps: - -@enumerate -@item - Configure to compile with instrumentation enabled: - @code{configure --toolchain=gcov}. - -@item - Run your test case, either manually or via FATE. This can be either - the full FATE regression suite, or any arbitrary invocation of any - front-end tool provided by FFmpeg, in any combination. - -@item - Run @code{make lcov} to generate coverage data in HTML format. - -@item - View @code{lcov/index.html} in your preferred HTML viewer. -@end enumerate - -You can use the command @code{make lcov-reset} to reset the coverage -measurements. You will need to rerun @code{make lcov} after running a -new test. - -@subsection Using Valgrind - -The configure script provides a shortcut for using valgrind to spot bugs -related to memory handling. Just add the option -@code{--toolchain=valgrind-memcheck} or @code{--toolchain=valgrind-massif} -to your configure line, and reasonable defaults will be set for running -FATE under the supervision of either the @strong{memcheck} or the -@strong{massif} tool of the valgrind suite. - -In case you need finer control over how valgrind is invoked, use the -@code{--target-exec='valgrind <your_custom_valgrind_options>} option in -your configure line instead. - -@anchor{Release process} -@section Release process - -FFmpeg maintains a set of @strong{release branches}, which are the -recommended deliverable for system integrators and distributors (such as -Linux distributions, etc.). At regular times, a @strong{release -manager} prepares, tests and publishes tarballs on the -@url{http://ffmpeg.org} website. - -There are two kinds of releases: - -@enumerate -@item -@strong{Major releases} always include the latest and greatest -features and functionality. - -@item -@strong{Point releases} are cut from @strong{release} branches, -which are named @code{release/X}, with @code{X} being the release -version number. -@end enumerate - -Note that we promise to our users that shared libraries from any FFmpeg -release never break programs that have been @strong{compiled} against -previous versions of @strong{the same release series} in any case! - -However, from time to time, we do make API changes that require adaptations -in applications. Such changes are only allowed in (new) major releases and -require further steps such as bumping library version numbers and/or -adjustments to the symbol versioning file. Please discuss such changes -on the @strong{ffmpeg-devel} mailing list in time to allow forward planning. - -@anchor{Criteria for Point Releases} -@subsection Criteria for Point Releases - -Changes that match the following criteria are valid candidates for -inclusion into a point release: - -@enumerate -@item -Fixes a security issue, preferably identified by a @strong{CVE -number} issued by @url{http://cve.mitre.org/}. - -@item -Fixes a documented bug in @url{https://trac.ffmpeg.org}. - -@item -Improves the included documentation. - -@item -Retains both source code and binary compatibility with previous -point releases of the same release branch. -@end enumerate - -The order for checking the rules is (1 OR 2 OR 3) AND 4. - - -@subsection Release Checklist - -The release process involves the following steps: - -@enumerate -@item -Ensure that the @file{RELEASE} file contains the version number for -the upcoming release. - -@item -Add the release at @url{https://trac.ffmpeg.org/admin/ticket/versions}. - -@item -Announce the intent to do a release to the mailing list. - -@item -Make sure all relevant security fixes have been backported. See -@url{https://ffmpeg.org/security.html}. - -@item -Ensure that the FATE regression suite still passes in the release -branch on at least @strong{i386} and @strong{amd64} -(cf. @ref{Regression tests}). - -@item -Prepare the release tarballs in @code{bz2} and @code{gz} formats, and -supplementing files that contain @code{gpg} signatures - -@item -Publish the tarballs at @url{http://ffmpeg.org/releases}. Create and -push an annotated tag in the form @code{nX}, with @code{X} -containing the version number. - -@item -Propose and send a patch to the @strong{ffmpeg-devel} mailing list -with a news entry for the website. - -@item -Publish the news entry. - -@item -Send announcement to the mailing list. -@end enumerate - -@bye diff --git a/ffmpeg/doc/doxy-wrapper.sh b/ffmpeg/doc/doxy-wrapper.sh deleted file mode 100755 index a6c54dd..0000000 --- a/ffmpeg/doc/doxy-wrapper.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -SRC_PATH="${1}" -DOXYFILE="${2}" - -shift 2 - -doxygen - <<EOF -@INCLUDE = ${DOXYFILE} -INPUT = $@ -EOF diff --git a/ffmpeg/doc/doxy/doxy_stylesheet.css b/ffmpeg/doc/doxy/doxy_stylesheet.css deleted file mode 100644 index d6dadde..0000000 --- a/ffmpeg/doc/doxy/doxy_stylesheet.css +++ /dev/null @@ -1,2021 +0,0 @@ -/*! - * Bootstrap v2.1.1 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */ - -html { - font-size: 100%; - -webkit-text-size-adjust: 100%; - -ms-text-size-adjust: 100%; -} -a:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} -a:hover, -a:current { - outline: 0; -} -img { - /* Responsive images (ensure images don't scale beyond their parents) */ - - max-width: 100%; - /* Part 1: Set a maxium relative to the parent */ - - width: auto\9; - /* IE7-8 need help adjusting responsive images */ - - height: auto; - /* Part 2: Scale the height according to the width, otherwise you get stretching */ - - vertical-align: middle; - border: 0; - -ms-interpolation-mode: bicubic; -} -body { - margin: 0; - font-family: sans-serif; - font-size: 14px; - line-height: 20px; - color: #333333; - background-color: #ffffff; -} -a { - color: #0088cc; - text-decoration: none; -} -a:hover { - color: #005580; - text-decoration: underline; -} -.container { - width: 940px; -} - -.container { - margin-right: auto; - margin-left: auto; - *zoom: 1; -} - -.container:before, -.container:after { - display: table; - content: ""; - line-height: 0; -} -.container:after { - clear: both; -} -.container-fluid { - padding-right: 20px; - padding-left: 20px; - *zoom: 1; -} -small { - font-size: 85%; -} -strong { - font-weight: bold; -} -em { - font-style: italic; -} -cite { - font-style: normal; -} -.text-warning { - color: #c09853; -} -.text-error { - color: #b94a48; -} -.text-info { - color: #3a87ad; -} -.text-success { - color: #468847; -} -h1, -h2, -h3, -h4, -h5, -h6 { - margin: 10px 0; - font-family: inherit; - font-weight: bold; - line-height: 1; - color: inherit; - text-rendering: optimizelegibility; -} -h1 small, -h2 small, -h3 small, -h4 small, -h5 small, -h6 small { - font-weight: normal; - line-height: 1; - color: #999999; -} -h1 { - font-size: 30px; - line-height: 40px; -} -h2 { - font-size: 20px; - line-height: 40px; -} -h3 { - font-size: 18px; - line-height: 40px; -} -h4 { - font-size: 18px; - line-height: 20px; -} -h5 { - font-size: 14px; - line-height: 20px; -} -h6 { - font-size: 12px; - line-height: 20px; -} -ul, -ol { - padding: 0; - margin: 0 0 10px 25px; -} -ul ul, -ul ol, -ol ol, -ol ul { - margin-bottom: 0; -} -li { - line-height: 20px; -} -ul.unstyled, -ol.unstyled { - margin-left: 0; - list-style: none; -} -dl { - margin-bottom: 20px; -} -dt, -dd { - line-height: 20px; -} -dt { - font-weight: bold; -} -dd { - margin-left: 10px; -} -blockquote { - padding: 0 0 0 15px; - margin: 0 0 20px; - border-left: 5px solid #eeeeee; -} -blockquote p { - margin-bottom: 0; - font-size: 16px; - font-weight: 300; - line-height: 25px; -} -blockquote:before, -blockquote:after { - content: ""; -} -.fragment, -code, -pre { - padding: 0 3px 2px; - font-family: monospace; - font-size: 12px; - color: #333333; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} -.fragment, -code { - padding: 2px 4px; - color: #d14; - background-color: #f7f7f9; - border: 1px solid #e1e1e8; -} -.fragment .line { - padding-left: 2em; - white-space: pre; -} -pre { - display: block; - padding: 9.5px; - margin: 0 0 10px; - font-size: 13px; - line-height: 20px; - word-break: break-all; - word-wrap: break-word; - white-space: pre-wrap; - background-color: #f5f5f5; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.15); - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} -pre code { - padding: 0; - color: inherit; - background-color: transparent; - border: 0; -} -.label, -.badge { - font-size: 11.844px; - font-weight: bold; - line-height: 14px; - color: #ffffff; - vertical-align: baseline; - white-space: nowrap; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #999999; -} -.label { - padding: 1px 4px 2px; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} -.badge { - padding: 1px 9px 2px; - -webkit-border-radius: 9px; - -moz-border-radius: 9px; - border-radius: 9px; -} - -.label a { - color:#ffffff; -} -a.label:hover, -a.badge:hover { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} -.label-important, -.badge-important { - background-color: #b94a48; -} -.label-important[href], -.badge-important[href] { - background-color: #953b39; -} -.label-warning, -.badge-warning { - background-color: #f89406; -} -.label-warning[href], -.badge-warning[href] { - background-color: #c67605; -} -.label-success, -.badge-success { - background-color: #468847; -} -.label-success[href], -.badge-success[href] { - background-color: #356635; -} -.label-info, -.badge-info { - background-color: #3a87ad; -} -.label-info[href], -.badge-info[href] { - background-color: #2d6987; -} -.label-inverse, -.badge-inverse { - background-color: #333333; -} -.label-inverse[href], -.badge-inverse[href] { - background-color: #1a1a1a; -} -table { - max-width: 100%; - background-color: transparent; - border-collapse: collapse; - border-spacing: 0; -} - -table [class*=span], -.row-fluid table [class*=span] { - display: table-cell; - float: none; - margin-left: 0; -} -fieldset { - padding: 0; - margin: 0; - border: 0; -} -legend { - display: block; - width: 100%; - padding: 0; - margin-bottom: 20px; - font-size: 21px; - line-height: 40px; - color: #333333; - border: 0; - border-bottom: 1px solid #e5e5e5; -} -legend small { - font-size: 15px; - color: #999999; -} -label, -input, -button, -select, -textarea { - font-size: 14px; - font-weight: normal; - line-height: 20px; -} -input, -button, -select, -textarea { - font-family: sans-serif; -} -label { - display: block; - margin-bottom: 5px; -} - -.tablist { - margin-left: 0; - margin-bottom: 20px; - list-style: none; -} -.tablist > li > a { - display: block; -} -.tablist > li > a:hover { - text-decoration: none; - background-color: #eeeeee; -} -.tablist > .pull-right { - float: right; -} -.tablist-header { - display: block; - padding: 3px 15px; - font-size: 11px; - font-weight: bold; - line-height: 20px; - color: #999999; - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); - text-transform: uppercase; -} -.tablist li + .tablist-header { - margin-top: 9px; -} -.tablist-list { - padding-left: 15px; - padding-right: 15px; - margin-bottom: 0; -} -.tablist-list > li > a, -.tablist-list .tablist-header { - margin-left: -15px; - margin-right: -15px; - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); -} -.tablist-list > li > a { - padding: 3px 15px; -} -.tablist-list > .current > a, -.tablist-list > .current > a:hover { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2); - background-color: #0088cc; -} -.tablist-list [class^="icon-"] { - margin-right: 2px; -} -.tablist-list .divider { - *width: 100%; - height: 1px; - margin: 9px 1px; - *margin: -5px 0 5px; - overflow: hidden; - background-color: #e5e5e5; - border-bottom: 1px solid #ffffff; -} -.tablist-tabs, -.tablist { - *zoom: 1; -} -.tablist-tabs:before, -.tablist:before, -.tablist-tabs:after, -.tablist:after { - display: table; - content: ""; - line-height: 0; -} -.tablist-tabs:after, -.tablist:after { - clear: both; -} -.tablist-tabs > li, -.tablist > li { - float: left; -} -.tablist-tabs > li > a, -.tablist > li > a { - padding-right: 12px; - padding-left: 12px; - margin-right: 2px; - line-height: 14px; -} -.tablist-tabs { - border-bottom: 1px solid #ddd; -} -.tablist-tabs > li { - margin-bottom: -1px; -} -.tablist-tabs > li > a { - padding-top: 8px; - padding-bottom: 8px; - line-height: 20px; - border: 1px solid transparent; - -webkit-border-radius: 4px 4px 0 0; - -moz-border-radius: 4px 4px 0 0; - border-radius: 4px 4px 0 0; -} -.tablist-tabs > li > a:hover { - border-color: #eeeeee #eeeeee #dddddd; -} -.tablist-tabs > .current > a, -.tablist-tabs > .current > a:hover { - color: #555555; - background-color: #ffffff; - border: 1px solid #ddd; - border-bottom-color: transparent; - cursor: default; -} -.tablist > li > a { - padding-top: 8px; - padding-bottom: 8px; - margin-top: 2px; - margin-bottom: 2px; - -webkit-border-radius: 5px; - -moz-border-radius: 5px; - border-radius: 5px; -} -.tablist > .current > a, -.tablist > .current > a:hover { - color: #ffffff; - background-color: #0088cc; -} -.tablist-stacked > li { - float: none; -} -.tablist-stacked > li > a { - margin-right: 0; -} -.tablist-tabs.tablist-stacked { - border-bottom: 0; -} -.tablist-tabs.tablist-stacked > li > a { - border: 1px solid #ddd; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} -.tablist-tabs.tablist-stacked > li:first-child > a { - -webkit-border-top-right-radius: 4px; - -moz-border-radius-topright: 4px; - border-top-right-radius: 4px; - -webkit-border-top-left-radius: 4px; - -moz-border-radius-topleft: 4px; - border-top-left-radius: 4px; -} -.tablist-tabs.tablist-stacked > li:last-child > a { - -webkit-border-bottom-right-radius: 4px; - -moz-border-radius-bottomright: 4px; - border-bottom-right-radius: 4px; - -webkit-border-bottom-left-radius: 4px; - -moz-border-radius-bottomleft: 4px; - border-bottom-left-radius: 4px; -} -.tablist-tabs.tablist-stacked > li > a:hover { - border-color: #ddd; - z-index: 2; -} -.tablist.tablist-stacked > li > a { - margin-bottom: 3px; -} -.tablist.tablist-stacked > li:last-child > a { - margin-bottom: 1px; -} -.tablist-tabs .dropdown-menu { - -webkit-border-radius: 0 0 6px 6px; - -moz-border-radius: 0 0 6px 6px; - border-radius: 0 0 6px 6px; -} -.tablist .dropdown-menu { - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} -.tablist .dropdown-toggle .caret { - border-top-color: #0088cc; - border-bottom-color: #0088cc; - margin-top: 6px; -} -.tablist .dropdown-toggle:hover .caret { - border-top-color: #005580; - border-bottom-color: #005580; -} -/* move down carets for tabs */ -.tablist-tabs .dropdown-toggle .caret { - margin-top: 8px; -} -.tablist .current .dropdown-toggle .caret { - border-top-color: #fff; - border-bottom-color: #fff; -} -.tablist-tabs .current .dropdown-toggle .caret { - border-top-color: #555555; - border-bottom-color: #555555; -} -.tablist > .dropdown.current > a:hover { - cursor: pointer; -} -.tablist-tabs .open .dropdown-toggle, -.tablist .open .dropdown-toggle, -.tablist > li.dropdown.open.current > a:hover { - color: #ffffff; - background-color: #999999; - border-color: #999999; -} -.tablist li.dropdown.open .caret, -.tablist li.dropdown.open.current .caret, -.tablist li.dropdown.open a:hover .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; - opacity: 1; - filter: alpha(opacity=100); -} -.tabs-stacked .open > a:hover { - border-color: #999999; -} -.tab-content > .tab-pane, -.pill-content > .pill-pane { - display: none; -} -.tab-content > .current, -.pill-content > .current { - display: block; -} -.tabs-below > .tablist-tabs { - border-top: 1px solid #ddd; -} -.tabs-below > .tablist-tabs > li { - margin-top: -1px; - margin-bottom: 0; -} -.tabs-below > .tablist-tabs > li > a { - -webkit-border-radius: 0 0 4px 4px; - -moz-border-radius: 0 0 4px 4px; - border-radius: 0 0 4px 4px; -} -.tabs-below > .tablist-tabs > li > a:hover { - border-bottom-color: transparent; - border-top-color: #ddd; -} -.tabs-below > .tablist-tabs > .current > a, -.tabs-below > .tablist-tabs > .current > a:hover { - border-color: transparent #ddd #ddd #ddd; -} -.tabs-left > .tablist-tabs > li, -.tabs-right > .tablist-tabs > li { - float: none; -} -.tabs-left > .tablist-tabs > li > a, -.tabs-right > .tablist-tabs > li > a { - min-width: 74px; - margin-right: 0; - margin-bottom: 3px; -} -.tabs-left > .tablist-tabs { - float: left; - margin-right: 19px; - border-right: 1px solid #ddd; -} -.tabs-left > .tablist-tabs > li > a { - margin-right: -1px; - -webkit-border-radius: 4px 0 0 4px; - -moz-border-radius: 4px 0 0 4px; - border-radius: 4px 0 0 4px; -} -.tabs-left > .tablist-tabs > li > a:hover { - border-color: #eeeeee #dddddd #eeeeee #eeeeee; -} -.tabs-left > .tablist-tabs .current > a, -.tabs-left > .tablist-tabs .current > a:hover { - border-color: #ddd transparent #ddd #ddd; - *border-right-color: #ffffff; -} -.tabs-right > .tablist-tabs { - float: right; - margin-left: 19px; - border-left: 1px solid #ddd; -} -.tabs-right > .tablist-tabs > li > a { - margin-left: -1px; - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} -.tabs-right > .tablist-tabs > li > a:hover { - border-color: #eeeeee #eeeeee #eeeeee #dddddd; -} -.tabs-right > .tablist-tabs .current > a, -.tabs-right > .tablist-tabs .current > a:hover { - border-color: #ddd #ddd #ddd transparent; - *border-left-color: #ffffff; -} -.tablist > .disabled > a { - color: #999999; -} -.tablist > .disabled > a:hover { - text-decoration: none; - background-color: transparent; - cursor: default; -} -.tablistbar { - overflow: visible; - margin-bottom: 20px; - color: #ffffff; - *position: relative; - *z-index: 2; -} -.tablistbar-inner { - min-height: 40px; - padding-left: 20px; - padding-right: 20px; - background-color: #034c03; - background-image: -moz-linear-gradient(top, #024002, #045f04); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#024002), to(#045f04)); - background-image: -webkit-linear-gradient(top, #024002, #045f04); - background-image: -o-linear-gradient(top, #024002, #045f04); - background-image: linear-gradient(to bottom, #024002, #045f04); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff024002', endColorstr='#ff045f04', GradientType=0); - border: 1px solid #022402; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - -webkit-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - -moz-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - *zoom: 1; -} -.tablistbar-inner:before, -.tablistbar-inner:after { - display: table; - content: ""; - line-height: 0; -} -.tablistbar-inner:after { - clear: both; -} -.tablistbar .container { - width: auto; -} -.tablist-collapse.collapse { - height: auto; -} -.tablistbar .brand { - float: left; - display: block; - padding: 10px 20px 10px; - margin-left: -20px; - font-size: 20px; - font-weight: 200; - color: #ffffff; - text-shadow: 0 1px 0 #024002; -} -.tablistbar .brand:hover { - text-decoration: none; -} -.tablistbar-text { - margin-bottom: 0; - line-height: 40px; -} -.tablistbar-link { - color: #ffffff; -} -.tablistbar-link:hover { - color: #333333; -} -.tablistbar .tablist { - position: relative; - left: 0; - display: block; - float: left; - margin: 0 10px 0 0; -} -.tablistbar .tablist.pull-right { - float: right; - margin-right: 0; -} -.tablistbar .tablist > li { - float: left; -} -.tablistbar .tablist > li > a { - float: none; - padding: 10px 15px 10px; - color: #ffffff; - text-decoration: none; - text-shadow: 0 1px 0 #024002; -} -.tablistbar .tablist .dropdown-toggle .caret { - margin-top: 8px; -} -.tablistbar .tablist > li > a:focus, -.tablistbar .tablist > li > a:hover { - background-color: transparent; - color: white; - text-decoration: none; -} -.tablistbar .tablist > .current > a, -.tablistbar .tablist > .current > a:hover, -.tablistbar .tablist > .current > a:focus { - color: #555555; - text-decoration: none; - background-color: #034703; - -webkit-box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125); - -moz-box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125); -} -.tablistbar .btn-navbar { - display: none; - float: right; - padding: 7px 10px; - margin-left: 5px; - margin-right: 5px; - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #023402; - background-image: -moz-linear-gradient(top, #012701, #034703); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#012701), to(#034703)); - background-image: -webkit-linear-gradient(top, #012701, #034703); - background-image: -o-linear-gradient(top, #012701, #034703); - background-image: linear-gradient(to bottom, #012701, #034703); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff012701', endColorstr='#ff034703', GradientType=0); - border-color: #034703 #034703 #000000; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - *background-color: #034703; - /* Darken IE7 buttons by default so they stand out more given they won't have borders */ - - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075); - -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075); -} -.tablistbar .tablist > li > .dropdown-menu:before { - content: ''; - display: inline-block; - border-left: 7px solid transparent; - border-right: 7px solid transparent; - border-bottom: 7px solid #ccc; - border-bottom-color: rgba(0, 0, 0, 0.2); - position: absolute; - top: -7px; - left: 9px; -} -.tablistbar .tablist > li > .dropdown-menu:after { - content: ''; - display: inline-block; - border-left: 6px solid transparent; - border-right: 6px solid transparent; - border-bottom: 6px solid #ffffff; - position: absolute; - top: -6px; - left: 10px; -} -.tablistbar .tablist li.dropdown.open > .dropdown-toggle, -.tablistbar .tablist li.dropdown.current > .dropdown-toggle, -.tablistbar .tablist li.dropdown.open.current > .dropdown-toggle { - background-color: #034703; - color: #555555; -} -.tablistbar .tablist li.dropdown > .dropdown-toggle .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} -.tablistbar .tablist li.dropdown.open > .dropdown-toggle .caret, -.tablistbar .tablist li.dropdown.current > .dropdown-toggle .caret, -.tablistbar .tablist li.dropdown.open.current > .dropdown-toggle .caret { - border-top-color: #555555; - border-bottom-color: #555555; -} -.tablistbar .pull-right > li > .dropdown-menu, -.tablistbar .tablist > li > .dropdown-menu.pull-right { - left: auto; - right: 0; -} -.tablistbar .pull-right > li > .dropdown-menu:before, -.tablistbar .tablist > li > .dropdown-menu.pull-right:before { - left: auto; - right: 12px; -} -.tablistbar .pull-right > li > .dropdown-menu:after, -.tablistbar .tablist > li > .dropdown-menu.pull-right:after { - left: auto; - right: 13px; -} -.tablistbar .pull-right > li > .dropdown-menu .dropdown-menu, -.tablistbar .tablist > li > .dropdown-menu.pull-right .dropdown-menu { - left: auto; - right: 100%; - margin-left: 0; - margin-right: -1px; - -webkit-border-radius: 6px 0 6px 6px; - -moz-border-radius: 6px 0 6px 6px; - border-radius: 6px 0 6px 6px; -} -.breadcrumb { - padding: 8px 15px; - margin: 0 0 20px; - list-style: none; - background-color: #f5f5f5; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} -.breadcrumb li { - display: inline-block; - *display: inline; - /* IE7 inline-block hack */ - - *zoom: 1; - text-shadow: 0 1px 0 #ffffff; -} -.breadcrumb .divider { - padding: 0 5px; - color: #ccc; -} -.breadcrumb .current { - color: #999999; -} -.pagination-right { - text-align: right; -} -.fade { - opacity: 0; - -webkit-transition: opacity 0.15s linear; - -moz-transition: opacity 0.15s linear; - -o-transition: opacity 0.15s linear; - transition: opacity 0.15s linear; -} -.fade.in { - opacity: 1; -} -.collapse { - position: relative; - height: 0; - overflow: hidden; - -webkit-transition: height 0.35s ease; - -moz-transition: height 0.35s ease; - -o-transition: height 0.35s ease; - transition: height 0.35s ease; -} -.collapse.in { - height: auto; -} -.hidden { - display: none; - visibility: hidden; -} -.visible-phone { - display: none !important; -} -.visible-tablet { - display: none !important; -} -.hidden-desktop { - display: none !important; -} -.visible-desktop { - display: inherit !important; -} -@media (min-width: 768px) and (max-width: 979px) { - .hidden-desktop { - display: inherit !important; - } - .visible-desktop { - display: none !important ; - } - .visible-tablet { - display: inherit !important; - } - .hidden-tablet { - display: none !important; - } -} -@media (max-width: 767px) { - .hidden-desktop { - display: inherit !important; - } - .visible-desktop { - display: none !important; - } - .visible-phone { - display: inherit !important; - } - .hidden-phone { - display: none !important; - } -} -@media (max-width: 767px) { - body { - padding-left: 20px; - padding-right: 20px; - } - .container { - width: auto; - } - .row, - .thumbnails { - margin-left: 0; - } -} -@media (max-width: 480px) { - .tablist-collapse { - -webkit-transform: translate3d(0, 0, 0); - } - .page-header h1 small { - display: block; - line-height: 20px; - } -} -@media (min-width: 768px) and (max-width: 979px) { - .row { - margin-left: -20px; - *zoom: 1; - } - .row:before, - .row:after { - display: table; - content: ""; - line-height: 0; - } - .row:after { - clear: both; - } - [class*="span"] { - float: left; - min-height: 1px; - margin-left: 20px; - } - .container { - width: 724px; - } -} -@media (min-width: 1200px) { - .row { - margin-left: -30px; - *zoom: 1; - } - .row:before, - .row:after { - display: table; - content: ""; - line-height: 0; - } - .row:after { - clear: both; - } - [class*="span"] { - float: left; - min-height: 1px; - margin-left: 30px; - } - .container { - width: 1070px; - } -} -@media (max-width: 979px) { - body { - padding-top: 0; - } -} -@media (min-width: 980px) { - .tablist-collapse.collapse { - height: auto !important; - overflow: visible !important; - } -} -.tablistbar .brand { - padding: 5px; - margin-left: 0; -} -.tablistbar .brand img { - width: 30px; - vertical-align: middle; -} - -h1 small { - font-size: 18px; -} - -h1 small, -h2 small, -h3 small, -h4 small, -h5 small, -h6 small, -.page-header small { - line-height: 0.8; - font-weight: normal; - color: #999999; - display:block; - vertical-align: middle; -} - -.page-header h1, h1:first-child { - font-size: 40px; - padding-bottom: 5px; -} - -.page-header h1 { - border-bottom: 1px solid #999999; - padding-bottom: 9px; -} - -.page-header img { - height: 80px; - padding-bottom: 5px; -} - -.page-header small { - line-height: 1.1; - font-size: 18px; -} - -h2, -h3, -h4, -div.ah, -.title { - border-color: #D6E9C6; - color: #468847; - border-style: solid; - border-width: 0 0 1px; - padding-left: 0.5em; -} - - -.google { - color: white; -} - -.breadcrumb { - font-size: 11px; - padding-top: 2px; - padding-bottom: 2px; -} - -h1 a, -h2 a, -h3 a, -h4 a { - color: inherit; -} - -.tablistbar-inner a { - font-weight: bold; -} - -.list-2panes:before, -.list-2panes:after { - display: table; - content: ""; - line-height: 0; -} - -.list-2panes:after { - clear:both; -} - -.list-2panes li { - width: 470px; - width: 470px; - float: left; - margin-left: 30px; - min-height: 1px; -} -/* The standard CSS for doxygen */ - -/* @group Heading Levels */ - - -dt { - font-weight: bold; -} - -div.multicol { - -moz-column-gap: 1em; - -webkit-column-gap: 1em; - -moz-column-count: 3; - -webkit-column-count: 3; -} - -p.startli, p.startdd, p.starttd { - margin-top: 2px; -} - -p.endli { - margin-bottom: 0px; -} - -p.enddd { - margin-bottom: 4px; -} - -p.endtd { - margin-bottom: 2px; -} - -/* @end */ - -caption { - font-weight: bold; -} - -span.legend { - font-size: 70%; - text-align: center; -} - -h3.version { - font-size: 90%; - text-align: center; -} - -div.qindex, div.tablisttab{ - background-color: #EBF6EB; - border: 1px solid #A3D7A3; - text-align: center; -} - -div.qindex, div.tablistpath { - width: 100%; - line-height: 140%; -} - -div.tablisttab { - margin-right: 15px; -} - -/* @group Link Styling */ - -a { - color: #3D8C3D; - font-weight: normal; - text-decoration: none; -} - -.contents a:visited { - color: #46A246; -} - -a:hover { - text-decoration: underline; -} - -a.qindex { - font-weight: bold; -} - -a.qindexHL { - font-weight: bold; - background-color: #9CD49C; - color: #ffffff; - border: 1px double #86CA86; -} - -.contents a.qindexHL:visited { - color: #ffffff; -} - -a.el { - font-weight: bold; -} - -a.elRef { -} - -a.code { - color: #4665A2; -} - -a.codeRef { - color: #4665A2; -} - -/* @end */ - -dl.el { - margin-left: -1cm; -} - -.fragment { - font-family: monospace, fixed; - font-size: 105%; -} - -pre.fragment { - border: 1px solid #C4E5C4; - background-color: #FBFDFB; - padding: 4px 6px; - margin: 4px 8px 4px 2px; - overflow: auto; - word-wrap: break-word; - font-size: 9pt; - line-height: 125%; -} - -div.groupHeader { - margin-left: 16px; - margin-top: 12px; - font-weight: bold; -} - -div.groupText { - margin-left: 16px; - font-style: italic; -} - -div.contents { - margin-top: 10px; - margin-left: 8px; - margin-right: 8px; -} - -td.indexkey { - white-space: nowrap; - vertical-align: top; -} - - -tr.memlist { - background-color: #EEF7EE; -} - -p.formulaDsp { - text-align: center; -} - -img.formulaDsp { - -} - -img.formulaInl { - vertical-align: middle; -} - -div.center { - text-align: center; - margin-top: 0px; - margin-bottom: 0px; - padding: 0px; -} - -div.center img { - border: 0px; -} - -#footer { - margin: -10px 1em 0; - padding-top: 20px; - text-align: center; - font-size: small; -} - -address.footer { - background-color: #ffffff; - text-align: center; -} - -img.footer { - border: 0px; - vertical-align: middle; -} - -/* @group Code Colorization */ - -span.keyword { - color: #008000 -} - -span.keywordtype { - color: #604020 -} - -span.keywordflow { - color: #e08000 -} - -span.comment { - color: #800000 -} - -span.preprocessor { - color: #806020 -} - -span.stringliteral { - color: #002080 -} - -span.charliteral { - color: #008080 -} - -span.vhdldigit { - color: #ff00ff -} - -span.vhdlchar { - color: #000000 -} - -span.vhdlkeyword { - color: #700070 -} - -span.vhdllogic { - color: #ff0000 -} - -/* @end */ - -/* -.search { - color: #003399; - font-weight: bold; -} - -form.search { - margin-bottom: 0px; - margin-top: 0px; -} - -input.search { - font-size: 75%; - color: #000080; - font-weight: normal; - background-color: #e8eef2; -} -*/ - -td.tiny { - font-size: 75%; -} - -.dirtab { - padding: 4px; - border-collapse: collapse; - border: 1px solid #A3D7A3; -} - -th.dirtab { - background: #EBF6EB; - font-weight: bold; -} - -hr { - height: 0px; - border: none; - border-top: 1px solid #4AAA4A; -} - -hr.footer { - height: 1px; -} - -/* @group Member Descriptions */ - -table.memberdecls { - border-spacing: 0px; - padding: 0px; -} - -.mdescLeft, .mdescRight, -.memItemLeft, .memItemRight, -.memTemplItemLeft, .memTemplItemRight, .memTemplParams { - background-color: #F9FCF9; - border: none; - margin: 4px; - padding: 1px 0 0 8px; -} - -.mdescLeft, .mdescRight { - padding: 0px 8px 4px 8px; - color: #555; -} - -.memItemLeft, .memItemRight, .memTemplParams { - border-top: 1px solid #C4E5C4; -} - -.memItemLeft, .memTemplItemLeft { - white-space: nowrap; -} - -.memItemRight { - width: 100%; -} - -.memTemplParams { - color: #46A246; - white-space: nowrap; -} - -/* @end */ - -/* @group Member Details */ - -/* Styles for detailed member documentation */ - -.memtemplate { - font-size: 80%; - color: #46A246; - font-weight: normal; - margin-left: 9px; -} - -.memnav { - background-color: #EBF6EB; - border: 1px solid #A3D7A3; - text-align: center; - margin: 2px; - margin-right: 15px; - padding: 2px; -} - -.mempage { - width: 100%; -} - -.memitem { - padding: 0; - margin-bottom: 10px; - margin-right: 5px; -} - -.memname { - white-space: nowrap; - font-weight: bold; - margin-left: 6px; -} - -.memproto, dl.reflist dt { - border-top: 1px solid #A8D9A8; - border-left: 1px solid #A8D9A8; - border-right: 1px solid #A8D9A8; - padding: 6px 0px 6px 0px; - color: #255525; - font-weight: bold; - text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); - /* opera specific markup */ - box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); - border-top-right-radius: 8px; - border-top-left-radius: 8px; - /* firefox specific markup */ - -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; - -moz-border-radius-topright: 8px; - -moz-border-radius-topleft: 8px; - /* webkit specific markup */ - -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); - -webkit-border-top-right-radius: 8px; - -webkit-border-top-left-radius: 8px; - background-repeat:repeat-x; - background-color: #E2F2E2; - -} - -.memdoc, dl.reflist dd { - border-bottom: 1px solid #A8D9A8; - border-left: 1px solid #A8D9A8; - border-right: 1px solid #A8D9A8; - padding: 2px 5px; - background-color: #FBFDFB; - border-top-width: 0; - /* opera specific markup */ - border-bottom-left-radius: 8px; - border-bottom-right-radius: 8px; - box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); - /* firefox specific markup */ - -moz-border-radius-bottomleft: 8px; - -moz-border-radius-bottomright: 8px; - -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; - background-image: -moz-linear-gradient(center top, #FFFFFF 0%, #FFFFFF 60%, #F7FBF7 95%, #EEF7EE); - /* webkit specific markup */ - -webkit-border-bottom-left-radius: 8px; - -webkit-border-bottom-right-radius: 8px; - -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); - background-image: -webkit-gradient(linear,center top,center bottom,from(#FFFFFF), color-stop(0.6,#FFFFFF), color-stop(0.60,#FFFFFF), color-stop(0.95,#F7FBF7), to(#EEF7EE)); -} - -dl.reflist dt { - padding: 5px; -} - -dl.reflist dd { - margin: 0px 0px 10px 0px; - padding: 5px; -} - -.paramkey { - text-align: right; -} - -.paramtype { - white-space: nowrap; -} - -.paramname { - color: #602020; - white-space: nowrap; -} -.paramname em { - font-style: normal; -} - -.params, .retval, .exception, .tparams { - border-spacing: 6px 2px; -} - -.params .paramname, .retval .paramname { - font-weight: bold; - vertical-align: top; -} - -.params .paramtype { - font-style: italic; - vertical-align: top; -} - -.params .paramdir { - font-family: "courier new",courier,monospace; - vertical-align: top; -} - - - - -/* @end */ - -/* @group Directory (tree) */ - -/* for the tree view */ - -.ftvtree { - font-family: sans-serif; - margin: 0px; -} - -/* these are for tree view when used as main index */ - -.directory { - font-size: 9pt; - font-weight: bold; - margin: 5px; -} - -.directory h3 { - margin: 0px; - margin-top: 1em; - font-size: 11pt; -} - -/* -The following two styles can be used to replace the root node title -with an image of your choice. Simply uncomment the next two styles, -specify the name of your image and be sure to set 'height' to the -proper pixel height of your image. -*/ - -/* -.directory h3.swap { - height: 61px; - background-repeat: no-repeat; - background-image: url("yourimage.gif"); -} -.directory h3.swap span { - display: none; -} -*/ - -.directory > h3 { - margin-top: 0; -} - -.directory p { - margin: 0px; - white-space: nowrap; -} - -.directory div { - display: none; - margin: 0px; -} - -.directory img { - vertical-align: -30%; -} - -/* these are for tree view when not used as main index */ - -.directory-alt { - font-size: 100%; - font-weight: bold; -} - -.directory-alt h3 { - margin: 0px; - margin-top: 1em; - font-size: 11pt; -} - -.directory-alt > h3 { - margin-top: 0; -} - -.directory-alt p { - margin: 0px; - white-space: nowrap; -} - -.directory-alt div { - display: none; - margin: 0px; -} - -.directory-alt img { - vertical-align: -30%; -} - -/* @end */ - -div.dynheader { - margin-top: 8px; -} - -address { - font-style: normal; - color: #2A612A; -} - -table.doxtable { - border-collapse:collapse; -} - -table.doxtable td, table.doxtable th { - border: 1px solid #2D682D; - padding: 3px 7px 2px; -} - -table.doxtable th { - background-color: #377F37; - color: #FFFFFF; - font-size: 110%; - padding-bottom: 4px; - padding-top: 5px; - text-align:left; -} - -table.fieldtable { - width: 100%; - margin-bottom: 10px; - border: 1px solid #A8D9A8; - border-spacing: 0px; - -moz-border-radius: 4px; - -webkit-border-radius: 4px; - border-radius: 4px; - -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; - -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); - box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); -} - -.fieldtable td, .fieldtable th { - padding: 3px 7px 2px; -} - -.fieldtable td.fieldtype, .fieldtable td.fieldname { - white-space: nowrap; - border-right: 1px solid #A8D9A8; - border-bottom: 1px solid #A8D9A8; - vertical-align: top; -} - -.fieldtable td.fielddoc { - border-bottom: 1px solid #A8D9A8; - width: 100%; -} - -.fieldtable tr:last-child td { - border-bottom: none; -} - -.fieldtable th { - background-repeat:repeat-x; - background-color: #E2F2E2; - font-size: 90%; - color: #255525; - padding-bottom: 4px; - padding-top: 5px; - text-align:left; - -moz-border-radius-topleft: 4px; - -moz-border-radius-topright: 4px; - -webkit-border-top-left-radius: 4px; - -webkit-border-top-right-radius: 4px; - border-top-left-radius: 4px; - border-top-right-radius: 4px; - border-bottom: 1px solid #A8D9A8; -} - - -.tabsearch { - top: 0px; - left: 10px; - height: 36px; - z-index: 101; - overflow: hidden; - font-size: 13px; -} - -.tablistpath ul -{ - font-size: 11px; - background-repeat:repeat-x; - height:30px; - line-height:30px; - color:#8ACC8A; - border:solid 1px #C2E4C2; - overflow:hidden; - margin:0px; - padding:0px; -} - -.tablistpath li -{ - list-style-type:none; - float:left; - padding-left:10px; - padding-right:15px; - background-repeat:no-repeat; - background-position:right; - color:#367C36; -} - -.tablistpath li.tablistelem a -{ - height:32px; - display:block; - text-decoration: none; - outline: none; -} - -.tablistpath li.tablistelem a:hover -{ - color:#68BD68; -} - -.tablistpath li.footer -{ - list-style-type:none; - float:right; - padding-left:10px; - padding-right:15px; - background-image:none; - background-repeat:no-repeat; - background-position:right; - color:#367C36; - font-size: 8pt; -} - - -div.summary -{ - margin-top: 12px; - text-align: center; -} - -div.summary a -{ - white-space: nowrap; -} - -div.ingroups -{ - margin-left: 5px; - font-size: 8pt; - padding-left: 5px; - width: 50%; - text-align: left; -} - -div.ingroups a -{ - white-space: nowrap; -} - -div.headertitle -{ - padding: 5px 5px 5px 7px; -} - -dl -{ - padding: 0 0 0 10px; -} - -dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug -{ - border-left:4px solid; - padding: 0 0 0 6px; -} - -dl.note -{ - border-color: #D0C000; -} - -dl.warning, dl.attention -{ - border-color: #FF0000; -} - -dl.pre, dl.post, dl.invariant -{ - border-color: #00D000; -} - -dl.deprecated -{ - border-color: #505050; -} - -dl.todo -{ - border-color: #00C0E0; -} - -dl.test -{ - border-color: #3030E0; -} - -dl.bug -{ - border-color: #C08050; -} - -#projectlogo -{ - text-align: center; - vertical-align: bottom; - border-collapse: separate; -} - -#projectlogo img -{ - border: 0px none; -} - -#projectname -{ - font: 300% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 2px 0px; -} - -#projectbrief -{ - font: 120% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 0px; -} - -#projectnumber -{ - font: 50% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 0px; -} - -#titlearea -{ - padding: 0px; - margin: 0px; - width: 100%; - border-bottom: 1px solid #53B453; -} - -.image -{ - text-align: center; -} - -.dotgraph -{ - text-align: center; -} - -.mscgraph -{ - text-align: center; -} - -.caption -{ - font-weight: bold; -} - -div.zoom -{ - border: 1px solid #90CE90; -} - -dl.citelist { - margin-bottom:50px; -} - -dl.citelist dt { - color:#337533; - float:left; - font-weight:bold; - margin-right:10px; - padding:5px; -} - -dl.citelist dd { - margin:2px 0; - padding:5px 0; -} - -@media print -{ - #top { display: none; } - #side-nav { display: none; } - #nav-path { display: none; } - body { overflow:visible; } - h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } - .summary { display: none; } - .memitem { page-break-inside: avoid; } - #doc-content - { - margin-left:0 !important; - height:auto !important; - width:auto !important; - overflow:inherit; - display:inline; - } - pre.fragment - { - overflow: visible; - text-wrap: unrestricted; - white-space: -moz-pre-wrap; /* Moz */ - white-space: -pre-wrap; /* Opera 4-6 */ - white-space: -o-pre-wrap; /* Opera 7 */ - white-space: pre-wrap; /* CSS3 */ - word-wrap: break-word; /* IE 5.5+ */ - } -} - -#proj_desc { - font-size: 1.2em; -} diff --git a/ffmpeg/doc/doxy/footer.html b/ffmpeg/doc/doxy/footer.html deleted file mode 100644 index 101e6fe..0000000 --- a/ffmpeg/doc/doxy/footer.html +++ /dev/null @@ -1,9 +0,0 @@ - - <footer class="footer pagination-right"> - <span class="label label-info"> - Generated on $datetime for $projectname by <a href="http://www.doxygen.org/index.html">doxygen</a> $doxygenversion - </span> - </footer> -</div> -</body> -</html> diff --git a/ffmpeg/doc/doxy/header.html b/ffmpeg/doc/doxy/header.html deleted file mode 100644 index 312990c..0000000 --- a/ffmpeg/doc/doxy/header.html +++ /dev/null @@ -1,16 +0,0 @@ -<!DOCTYPE html> -<html> -<head> -<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> -<meta http-equiv="X-UA-Compatible" content="IE=9"/> -<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME--> -<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME--> -<link href="$relpath$doxy_stylesheet.css" rel="stylesheet" type="text/css" /> -<!--Header replace --> - -</head> - -<div class="container"> - -<!--Header replace --> -<div class="menu"> diff --git a/ffmpeg/doc/encoders.texi b/ffmpeg/doc/encoders.texi deleted file mode 100644 index ea5b3e4..0000000 --- a/ffmpeg/doc/encoders.texi +++ /dev/null @@ -1,2020 +0,0 @@ -@chapter Encoders -@c man begin ENCODERS - -Encoders are configured elements in FFmpeg which allow the encoding of -multimedia streams. - -When you configure your FFmpeg build, all the supported native encoders -are enabled by default. Encoders requiring an external library must be enabled -manually via the corresponding @code{--enable-lib} option. You can list all -available encoders using the configure option @code{--list-encoders}. - -You can disable all the encoders with the configure option -@code{--disable-encoders} and selectively enable / disable single encoders -with the options @code{--enable-encoder=@var{ENCODER}} / -@code{--disable-encoder=@var{ENCODER}}. - -The option @code{-codecs} of the ff* tools will display the list of -enabled encoders. - -@c man end ENCODERS - -@chapter Audio Encoders -@c man begin AUDIO ENCODERS - -A description of some of the currently available audio encoders -follows. - -@anchor{aacenc} -@section aac - -Advanced Audio Coding (AAC) encoder. - -This encoder is an experimental FFmpeg-native AAC encoder. Currently only the -low complexity (AAC-LC) profile is supported. To use this encoder, you must set -@option{strict} option to @samp{experimental} or lower. - -As this encoder is experimental, unexpected behavior may exist from time to -time. For a more stable AAC encoder, see @ref{libvo-aacenc}. However, be warned -that it has a worse quality reported by some users. - -@c todo @ref{libaacplus} -See also @ref{libfdk-aac-enc,,libfdk_aac} and @ref{libfaac}. - -@subsection Options - -@table @option -@item b -Set bit rate in bits/s. Setting this automatically activates constant bit rate -(CBR) mode. - -@item q -Set quality for variable bit rate (VBR) mode. This option is valid only using -the @command{ffmpeg} command-line tool. For library interface users, use -@option{global_quality}. - -@item stereo_mode -Set stereo encoding mode. Possible values: - -@table @samp -@item auto -Automatically selected by the encoder. - -@item ms_off -Disable middle/side encoding. This is the default. - -@item ms_force -Force middle/side encoding. -@end table - -@item aac_coder -Set AAC encoder coding method. Possible values: - -@table @samp -@item faac -FAAC-inspired method. - -This method is a simplified reimplementation of the method used in FAAC, which -sets thresholds proportional to the band energies, and then decreases all the -thresholds with quantizer steps to find the appropriate quantization with -distortion below threshold band by band. - -The quality of this method is comparable to the two loop searching method -descibed below, but somewhat a little better and slower. - -@item anmr -Average noise to mask ratio (ANMR) trellis-based solution. - -This has a theoretic best quality out of all the coding methods, but at the -cost of the slowest speed. - -@item twoloop -Two loop searching (TLS) method. - -This method first sets quantizers depending on band thresholds and then tries -to find an optimal combination by adding or subtracting a specific value from -all quantizers and adjusting some individual quantizer a little. - -This method produces similar quality with the FAAC method and is the default. - -@item fast -Constant quantizer method. - -This method sets a constant quantizer for all bands. This is the fastest of all -the methods, yet produces the worst quality. - -@end table - -@end table - -@section ac3 and ac3_fixed - -AC-3 audio encoders. - -These encoders implement part of ATSC A/52:2010 and ETSI TS 102 366, as well as -the undocumented RealAudio 3 (a.k.a. dnet). - -The @var{ac3} encoder uses floating-point math, while the @var{ac3_fixed} -encoder only uses fixed-point integer math. This does not mean that one is -always faster, just that one or the other may be better suited to a -particular system. The floating-point encoder will generally produce better -quality audio for a given bitrate. The @var{ac3_fixed} encoder is not the -default codec for any of the output formats, so it must be specified explicitly -using the option @code{-acodec ac3_fixed} in order to use it. - -@subsection AC-3 Metadata - -The AC-3 metadata options are used to set parameters that describe the audio, -but in most cases do not affect the audio encoding itself. Some of the options -do directly affect or influence the decoding and playback of the resulting -bitstream, while others are just for informational purposes. A few of the -options will add bits to the output stream that could otherwise be used for -audio data, and will thus affect the quality of the output. Those will be -indicated accordingly with a note in the option list below. - -These parameters are described in detail in several publicly-available -documents. -@itemize -@item @uref{http://www.atsc.org/cms/standards/a_52-2010.pdf,A/52:2010 - Digital Audio Compression (AC-3) (E-AC-3) Standard} -@item @uref{http://www.atsc.org/cms/standards/a_54a_with_corr_1.pdf,A/54 - Guide to the Use of the ATSC Digital Television Standard} -@item @uref{http://www.dolby.com/uploadedFiles/zz-_Shared_Assets/English_PDFs/Professional/18_Metadata.Guide.pdf,Dolby Metadata Guide} -@item @uref{http://www.dolby.com/uploadedFiles/zz-_Shared_Assets/English_PDFs/Professional/46_DDEncodingGuidelines.pdf,Dolby Digital Professional Encoding Guidelines} -@end itemize - -@subsubsection Metadata Control Options - -@table @option - -@item -per_frame_metadata @var{boolean} -Allow Per-Frame Metadata. Specifies if the encoder should check for changing -metadata for each frame. -@table @option -@item 0 -The metadata values set at initialization will be used for every frame in the -stream. (default) -@item 1 -Metadata values can be changed before encoding each frame. -@end table - -@end table - -@subsubsection Downmix Levels - -@table @option - -@item -center_mixlev @var{level} -Center Mix Level. The amount of gain the decoder should apply to the center -channel when downmixing to stereo. This field will only be written to the -bitstream if a center channel is present. The value is specified as a scale -factor. There are 3 valid values: -@table @option -@item 0.707 -Apply -3dB gain -@item 0.595 -Apply -4.5dB gain (default) -@item 0.500 -Apply -6dB gain -@end table - -@item -surround_mixlev @var{level} -Surround Mix Level. The amount of gain the decoder should apply to the surround -channel(s) when downmixing to stereo. This field will only be written to the -bitstream if one or more surround channels are present. The value is specified -as a scale factor. There are 3 valid values: -@table @option -@item 0.707 -Apply -3dB gain -@item 0.500 -Apply -6dB gain (default) -@item 0.000 -Silence Surround Channel(s) -@end table - -@end table - -@subsubsection Audio Production Information -Audio Production Information is optional information describing the mixing -environment. Either none or both of the fields are written to the bitstream. - -@table @option - -@item -mixing_level @var{number} -Mixing Level. Specifies peak sound pressure level (SPL) in the production -environment when the mix was mastered. Valid values are 80 to 111, or -1 for -unknown or not indicated. The default value is -1, but that value cannot be -used if the Audio Production Information is written to the bitstream. Therefore, -if the @code{room_type} option is not the default value, the @code{mixing_level} -option must not be -1. - -@item -room_type @var{type} -Room Type. Describes the equalization used during the final mixing session at -the studio or on the dubbing stage. A large room is a dubbing stage with the -industry standard X-curve equalization; a small room has flat equalization. -This field will not be written to the bitstream if both the @code{mixing_level} -option and the @code{room_type} option have the default values. -@table @option -@item 0 -@itemx notindicated -Not Indicated (default) -@item 1 -@itemx large -Large Room -@item 2 -@itemx small -Small Room -@end table - -@end table - -@subsubsection Other Metadata Options - -@table @option - -@item -copyright @var{boolean} -Copyright Indicator. Specifies whether a copyright exists for this audio. -@table @option -@item 0 -@itemx off -No Copyright Exists (default) -@item 1 -@itemx on -Copyright Exists -@end table - -@item -dialnorm @var{value} -Dialogue Normalization. Indicates how far the average dialogue level of the -program is below digital 100% full scale (0 dBFS). This parameter determines a -level shift during audio reproduction that sets the average volume of the -dialogue to a preset level. The goal is to match volume level between program -sources. A value of -31dB will result in no volume level change, relative to -the source volume, during audio reproduction. Valid values are whole numbers in -the range -31 to -1, with -31 being the default. - -@item -dsur_mode @var{mode} -Dolby Surround Mode. Specifies whether the stereo signal uses Dolby Surround -(Pro Logic). This field will only be written to the bitstream if the audio -stream is stereo. Using this option does @b{NOT} mean the encoder will actually -apply Dolby Surround processing. -@table @option -@item 0 -@itemx notindicated -Not Indicated (default) -@item 1 -@itemx off -Not Dolby Surround Encoded -@item 2 -@itemx on -Dolby Surround Encoded -@end table - -@item -original @var{boolean} -Original Bit Stream Indicator. Specifies whether this audio is from the -original source and not a copy. -@table @option -@item 0 -@itemx off -Not Original Source -@item 1 -@itemx on -Original Source (default) -@end table - -@end table - -@subsection Extended Bitstream Information -The extended bitstream options are part of the Alternate Bit Stream Syntax as -specified in Annex D of the A/52:2010 standard. It is grouped into 2 parts. -If any one parameter in a group is specified, all values in that group will be -written to the bitstream. Default values are used for those that are written -but have not been specified. If the mixing levels are written, the decoder -will use these values instead of the ones specified in the @code{center_mixlev} -and @code{surround_mixlev} options if it supports the Alternate Bit Stream -Syntax. - -@subsubsection Extended Bitstream Information - Part 1 - -@table @option - -@item -dmix_mode @var{mode} -Preferred Stereo Downmix Mode. Allows the user to select either Lt/Rt -(Dolby Surround) or Lo/Ro (normal stereo) as the preferred stereo downmix mode. -@table @option -@item 0 -@itemx notindicated -Not Indicated (default) -@item 1 -@itemx ltrt -Lt/Rt Downmix Preferred -@item 2 -@itemx loro -Lo/Ro Downmix Preferred -@end table - -@item -ltrt_cmixlev @var{level} -Lt/Rt Center Mix Level. The amount of gain the decoder should apply to the -center channel when downmixing to stereo in Lt/Rt mode. -@table @option -@item 1.414 -Apply +3dB gain -@item 1.189 -Apply +1.5dB gain -@item 1.000 -Apply 0dB gain -@item 0.841 -Apply -1.5dB gain -@item 0.707 -Apply -3.0dB gain -@item 0.595 -Apply -4.5dB gain (default) -@item 0.500 -Apply -6.0dB gain -@item 0.000 -Silence Center Channel -@end table - -@item -ltrt_surmixlev @var{level} -Lt/Rt Surround Mix Level. The amount of gain the decoder should apply to the -surround channel(s) when downmixing to stereo in Lt/Rt mode. -@table @option -@item 0.841 -Apply -1.5dB gain -@item 0.707 -Apply -3.0dB gain -@item 0.595 -Apply -4.5dB gain -@item 0.500 -Apply -6.0dB gain (default) -@item 0.000 -Silence Surround Channel(s) -@end table - -@item -loro_cmixlev @var{level} -Lo/Ro Center Mix Level. The amount of gain the decoder should apply to the -center channel when downmixing to stereo in Lo/Ro mode. -@table @option -@item 1.414 -Apply +3dB gain -@item 1.189 -Apply +1.5dB gain -@item 1.000 -Apply 0dB gain -@item 0.841 -Apply -1.5dB gain -@item 0.707 -Apply -3.0dB gain -@item 0.595 -Apply -4.5dB gain (default) -@item 0.500 -Apply -6.0dB gain -@item 0.000 -Silence Center Channel -@end table - -@item -loro_surmixlev @var{level} -Lo/Ro Surround Mix Level. The amount of gain the decoder should apply to the -surround channel(s) when downmixing to stereo in Lo/Ro mode. -@table @option -@item 0.841 -Apply -1.5dB gain -@item 0.707 -Apply -3.0dB gain -@item 0.595 -Apply -4.5dB gain -@item 0.500 -Apply -6.0dB gain (default) -@item 0.000 -Silence Surround Channel(s) -@end table - -@end table - -@subsubsection Extended Bitstream Information - Part 2 - -@table @option - -@item -dsurex_mode @var{mode} -Dolby Surround EX Mode. Indicates whether the stream uses Dolby Surround EX -(7.1 matrixed to 5.1). Using this option does @b{NOT} mean the encoder will actually -apply Dolby Surround EX processing. -@table @option -@item 0 -@itemx notindicated -Not Indicated (default) -@item 1 -@itemx on -Dolby Surround EX Off -@item 2 -@itemx off -Dolby Surround EX On -@end table - -@item -dheadphone_mode @var{mode} -Dolby Headphone Mode. Indicates whether the stream uses Dolby Headphone -encoding (multi-channel matrixed to 2.0 for use with headphones). Using this -option does @b{NOT} mean the encoder will actually apply Dolby Headphone -processing. -@table @option -@item 0 -@itemx notindicated -Not Indicated (default) -@item 1 -@itemx on -Dolby Headphone Off -@item 2 -@itemx off -Dolby Headphone On -@end table - -@item -ad_conv_type @var{type} -A/D Converter Type. Indicates whether the audio has passed through HDCD A/D -conversion. -@table @option -@item 0 -@itemx standard -Standard A/D Converter (default) -@item 1 -@itemx hdcd -HDCD A/D Converter -@end table - -@end table - -@subsection Other AC-3 Encoding Options - -@table @option - -@item -stereo_rematrixing @var{boolean} -Stereo Rematrixing. Enables/Disables use of rematrixing for stereo input. This -is an optional AC-3 feature that increases quality by selectively encoding -the left/right channels as mid/side. This option is enabled by default, and it -is highly recommended that it be left as enabled except for testing purposes. - -@end table - -@subsection Floating-Point-Only AC-3 Encoding Options - -These options are only valid for the floating-point encoder and do not exist -for the fixed-point encoder due to the corresponding features not being -implemented in fixed-point. - -@table @option - -@item -channel_coupling @var{boolean} -Enables/Disables use of channel coupling, which is an optional AC-3 feature -that increases quality by combining high frequency information from multiple -channels into a single channel. The per-channel high frequency information is -sent with less accuracy in both the frequency and time domains. This allows -more bits to be used for lower frequencies while preserving enough information -to reconstruct the high frequencies. This option is enabled by default for the -floating-point encoder and should generally be left as enabled except for -testing purposes or to increase encoding speed. -@table @option -@item -1 -@itemx auto -Selected by Encoder (default) -@item 0 -@itemx off -Disable Channel Coupling -@item 1 -@itemx on -Enable Channel Coupling -@end table - -@item -cpl_start_band @var{number} -Coupling Start Band. Sets the channel coupling start band, from 1 to 15. If a -value higher than the bandwidth is used, it will be reduced to 1 less than the -coupling end band. If @var{auto} is used, the start band will be determined by -the encoder based on the bit rate, sample rate, and channel layout. This option -has no effect if channel coupling is disabled. -@table @option -@item -1 -@itemx auto -Selected by Encoder (default) -@end table - -@end table - -@anchor{libfaac} -@section libfaac - -libfaac AAC (Advanced Audio Coding) encoder wrapper. - -Requires the presence of the libfaac headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libfaac --enable-nonfree}. - -This encoder is considered to be of higher quality with respect to the -@ref{aacenc,,the native experimental FFmpeg AAC encoder}. - -For more information see the libfaac project at -@url{http://www.audiocoding.com/faac.html/}. - -@subsection Options - -The following shared FFmpeg codec options are recognized. - -The following options are supported by the libfaac wrapper. The -@command{faac}-equivalent of the options are listed in parentheses. - -@table @option -@item b (@emph{-b}) -Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate -is not explicitly specified, it is automatically set to a suitable -value depending on the selected profile. @command{faac} bitrate is -expressed in kilobits/s. - -Note that libfaac does not support CBR (Constant Bit Rate) but only -ABR (Average Bit Rate). - -If VBR mode is enabled this option is ignored. - -@item ar (@emph{-R}) -Set audio sampling rate (in Hz). - -@item ac (@emph{-c}) -Set the number of audio channels. - -@item cutoff (@emph{-C}) -Set cutoff frequency. If not specified (or explicitly set to 0) it -will use a value automatically computed by the library. Default value -is 0. - -@item profile -Set audio profile. - -The following profiles are recognized: -@table @samp -@item aac_main -Main AAC (Main) - -@item aac_low -Low Complexity AAC (LC) - -@item aac_ssr -Scalable Sample Rate (SSR) - -@item aac_ltp -Long Term Prediction (LTP) -@end table - -If not specified it is set to @samp{aac_low}. - -@item flags +qscale -Set constant quality VBR (Variable Bit Rate) mode. - -@item global_quality -Set quality in VBR mode as an integer number of lambda units. - -Only relevant when VBR mode is enabled with @code{flags +qscale}. The -value is converted to QP units by dividing it by @code{FF_QP2LAMBDA}, -and used to set the quality value used by libfaac. A reasonable range -for the option value in QP units is [10-500], the higher the value the -higher the quality. - -@item q (@emph{-q}) -Enable VBR mode when set to a non-negative value, and set constant -quality value as a double floating point value in QP units. - -The value sets the quality value used by libfaac. A reasonable range -for the option value is [10-500], the higher the value the higher the -quality. - -This option is valid only using the @command{ffmpeg} command-line -tool. For library interface users, use @option{global_quality}. -@end table - -@subsection Examples - -@itemize -@item -Use @command{ffmpeg} to convert an audio file to ABR 128 kbps AAC in an M4A (MP4) -container: -@example -ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a -@end example - -@item -Use @command{ffmpeg} to convert an audio file to VBR AAC, using the -LTP AAC profile: -@example -ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a -@end example -@end itemize - -@anchor{libfdk-aac-enc} -@section libfdk_aac - -libfdk-aac AAC (Advanced Audio Coding) encoder wrapper. - -The libfdk-aac library is based on the Fraunhofer FDK AAC code from -the Android project. - -Requires the presence of the libfdk-aac headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libfdk-aac}. The library is also incompatible with GPL, -so if you allow the use of GPL, you should configure with -@code{--enable-gpl --enable-nonfree --enable-libfdk-aac}. - -This encoder is considered to be of higher quality with respect to -both @ref{aacenc,,the native experimental FFmpeg AAC encoder} and -@ref{libfaac}. - -VBR encoding, enabled through the @option{vbr} or @option{flags -+qscale} options, is experimental and only works with some -combinations of parameters. - -For more information see the fdk-aac project at -@url{http://sourceforge.net/p/opencore-amr/fdk-aac/}. - -@subsection Options - -The following options are mapped on the shared FFmpeg codec options. - -@table @option -@item b -Set bit rate in bits/s. If the bitrate is not explicitly specified, it -is automatically set to a suitable value depending on the selected -profile. - -In case VBR mode is enabled the option is ignored. - -@item ar -Set audio sampling rate (in Hz). - -@item channels -Set the number of audio channels. - -@item flags +qscale -Enable fixed quality, VBR (Variable Bit Rate) mode. -Note that VBR is implicitly enabled when the @option{vbr} value is -positive. - -@item cutoff -Set cutoff frequency. If not specified (or explicitly set to 0) it -will use a value automatically computed by the library. Default value -is 0. - -@item profile -Set audio profile. - -The following profiles are recognized: -@table @samp -@item aac_low -Low Complexity AAC (LC) - -@item aac_he -High Efficiency AAC (HE-AAC) - -@item aac_he_v2 -High Efficiency AAC version 2 (HE-AACv2) - -@item aac_ld -Low Delay AAC (LD) - -@item aac_eld -Enhanced Low Delay AAC (ELD) -@end table - -If not specified it is set to @samp{aac_low}. -@end table - -The following are private options of the libfdk_aac encoder. - -@table @option -@item afterburner -Enable afterburner feature if set to 1, disabled if set to 0. This -improves the quality but also the required processing power. - -Default value is 1. - -@item eld_sbr -Enable SBR (Spectral Band Replication) for ELD if set to 1, disabled -if set to 0. - -Default value is 0. - -@item signaling -Set SBR/PS signaling style. - -It can assume one of the following values: -@table @samp -@item default -choose signaling implicitly (explicit hierarchical by default, -implicit if global header is disabled) - -@item implicit -implicit backwards compatible signaling - -@item explicit_sbr -explicit SBR, implicit PS signaling - -@item explicit_hierarchical -explicit hierarchical signaling -@end table - -Default value is @samp{default}. - -@item latm -Output LATM/LOAS encapsulated data if set to 1, disabled if set to 0. - -Default value is 0. - -@item header_period -Set StreamMuxConfig and PCE repetition period (in frames) for sending -in-band configuration buffers within LATM/LOAS transport layer. - -Must be a 16-bits non-negative integer. - -Default value is 0. - -@item vbr -Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty -good) and 5 is highest quality. A value of 0 will disable VBR, and CBR -(Constant Bit Rate) is enabled. - -Currently only the @samp{aac_low} profile supports VBR encoding. - -VBR modes 1-5 correspond to roughly the following average bit rates: - -@table @samp -@item 1 -32 kbps/channel -@item 2 -40 kbps/channel -@item 3 -48-56 kbps/channel -@item 4 -64 kbps/channel -@item 5 -about 80-96 kbps/channel -@end table - -Default value is 0. -@end table - -@subsection Examples - -@itemize -@item -Use @command{ffmpeg} to convert an audio file to VBR AAC in an M4A (MP4) -container: -@example -ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a -@end example - -@item -Use @command{ffmpeg} to convert an audio file to CBR 64k kbps AAC, using the -High-Efficiency AAC profile: -@example -ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a -@end example -@end itemize - -@anchor{libmp3lame} -@section libmp3lame - -LAME (Lame Ain't an MP3 Encoder) MP3 encoder wrapper. - -Requires the presence of the libmp3lame headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libmp3lame}. - -See @ref{libshine} for a fixed-point MP3 encoder, although with a -lower quality. - -@subsection Options - -The following options are supported by the libmp3lame wrapper. The -@command{lame}-equivalent of the options are listed in parentheses. - -@table @option -@item b (@emph{-b}) -Set bitrate expressed in bits/s for CBR or ABR. LAME @code{bitrate} is -expressed in kilobits/s. - -@item q (@emph{-V}) -Set constant quality setting for VBR. This option is valid only -using the @command{ffmpeg} command-line tool. For library interface -users, use @option{global_quality}. - -@item compression_level (@emph{-q}) -Set algorithm quality. Valid arguments are integers in the 0-9 range, -with 0 meaning highest quality but slowest, and 9 meaning fastest -while producing the worst quality. - -@item reservoir -Enable use of bit reservoir when set to 1. Default value is 1. LAME -has this enabled by default, but can be overriden by use -@option{--nores} option. - -@item joint_stereo (@emph{-m j}) -Enable the encoder to use (on a frame by frame basis) either L/R -stereo or mid/side stereo. Default value is 1. - -@item abr (@emph{--abr}) -Enable the encoder to use ABR when set to 1. The @command{lame} -@option{--abr} sets the target bitrate, while this options only -tells FFmpeg to use ABR still relies on @option{b} to set bitrate. - -@end table - -@section libopencore-amrnb - -OpenCORE Adaptive Multi-Rate Narrowband encoder. - -Requires the presence of the libopencore-amrnb headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libopencore-amrnb --enable-version3}. - -This is a mono-only encoder. Officially it only supports 8000Hz sample rate, -but you can override it by setting @option{strict} to @samp{unofficial} or -lower. - -@subsection Options - -@table @option - -@item b -Set bitrate in bits per second. Only the following bitrates are supported, -otherwise libavcodec will round to the nearest valid bitrate. - -@table @option -@item 4750 -@item 5150 -@item 5900 -@item 6700 -@item 7400 -@item 7950 -@item 10200 -@item 12200 -@end table - -@item dtx -Allow discontinuous transmission (generate comfort noise) when set to 1. The -default value is 0 (disabled). - -@end table - -@anchor{libshine} -@section libshine - -Shine Fixed-Point MP3 encoder wrapper. - -Shine is a fixed-point MP3 encoder. It has a far better performance on -platforms without an FPU, e.g. armel CPUs, and some phones and tablets. -However, as it is more targeted on performance than quality, it is not on par -with LAME and other production-grade encoders quality-wise. Also, according to -the project's homepage, this encoder may not be free of bugs as the code was -written a long time ago and the project was dead for at least 5 years. - -This encoder only supports stereo and mono input. This is also CBR-only. - -The original project (last updated in early 2007) is at -@url{http://sourceforge.net/projects/libshine-fxp/}. We only support the -updated fork by the Savonet/Liquidsoap project at @url{https://github.com/savonet/shine}. - -Requires the presence of the libshine headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libshine}. - -See also @ref{libmp3lame}. - -@subsection Options - -The following options are supported by the libshine wrapper. The -@command{shineenc}-equivalent of the options are listed in parentheses. - -@table @option -@item b (@emph{-b}) -Set bitrate expressed in bits/s for CBR. @command{shineenc} @option{-b} option -is expressed in kilobits/s. - -@end table - -@section libtwolame - -TwoLAME MP2 encoder wrapper. - -Requires the presence of the libtwolame headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libtwolame}. - -@subsection Options - -The following options are supported by the libtwolame wrapper. The -@command{twolame}-equivalent options follow the FFmpeg ones and are in -parentheses. - -@table @option -@item b (@emph{-b}) -Set bitrate expressed in bits/s for CBR. @command{twolame} @option{b} -option is expressed in kilobits/s. Default value is 128k. - -@item q (@emph{-V}) -Set quality for experimental VBR support. Maximum value range is -from -50 to 50, useful range is from -10 to 10. The higher the -value, the better the quality. This option is valid only using the -@command{ffmpeg} command-line tool. For library interface users, -use @option{global_quality}. - -@item mode (@emph{--mode}) -Set the mode of the resulting audio. Possible values: - -@table @samp -@item auto -Choose mode automatically based on the input. This is the default. -@item stereo -Stereo -@item joint_stereo -Joint stereo -@item dual_channel -Dual channel -@item mono -Mono -@end table - -@item psymodel (@emph{--psyc-mode}) -Set psychoacoustic model to use in encoding. The argument must be -an integer between -1 and 4, inclusive. The higher the value, the -better the quality. The default value is 3. - -@item energy_levels (@emph{--energy}) -Enable energy levels extensions when set to 1. The default value is -0 (disabled). - -@item error_protection (@emph{--protect}) -Enable CRC error protection when set to 1. The default value is 0 -(disabled). - -@item copyright (@emph{--copyright}) -Set MPEG audio copyright flag when set to 1. The default value is 0 -(disabled). - -@item original (@emph{--original}) -Set MPEG audio original flag when set to 1. The default value is 0 -(disabled). - -@end table - -@anchor{libvo-aacenc} -@section libvo-aacenc - -VisualOn AAC encoder. - -Requires the presence of the libvo-aacenc headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libvo-aacenc --enable-version3}. - -This encoder is considered to be worse than the -@ref{aacenc,,native experimental FFmpeg AAC encoder}, according to -multiple sources. - -@subsection Options - -The VisualOn AAC encoder only support encoding AAC-LC and up to 2 -channels. It is also CBR-only. - -@table @option - -@item b -Set bit rate in bits/s. - -@end table - -@section libvo-amrwbenc - -VisualOn Adaptive Multi-Rate Wideband encoder. - -Requires the presence of the libvo-amrwbenc headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libvo-amrwbenc --enable-version3}. - -This is a mono-only encoder. Officially it only supports 16000Hz sample -rate, but you can override it by setting @option{strict} to -@samp{unofficial} or lower. - -@subsection Options - -@table @option - -@item b -Set bitrate in bits/s. Only the following bitrates are supported, otherwise -libavcodec will round to the nearest valid bitrate. - -@table @samp -@item 6600 -@item 8850 -@item 12650 -@item 14250 -@item 15850 -@item 18250 -@item 19850 -@item 23050 -@item 23850 -@end table - -@item dtx -Allow discontinuous transmission (generate comfort noise) when set to 1. The -default value is 0 (disabled). - -@end table - -@section libopus - -libopus Opus Interactive Audio Codec encoder wrapper. - -Requires the presence of the libopus headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libopus}. - -@subsection Option Mapping - -Most libopus options are modeled after the @command{opusenc} utility from -opus-tools. The following is an option mapping chart describing options -supported by the libopus wrapper, and their @command{opusenc}-equivalent -in parentheses. - -@table @option - -@item b (@emph{bitrate}) -Set the bit rate in bits/s. FFmpeg's @option{b} option is -expressed in bits/s, while @command{opusenc}'s @option{bitrate} in -kilobits/s. - -@item vbr (@emph{vbr}, @emph{hard-cbr}, and @emph{cvbr}) -Set VBR mode. The FFmpeg @option{vbr} option has the following -valid arguments, with the their @command{opusenc} equivalent options -in parentheses: - -@table @samp -@item off (@emph{hard-cbr}) -Use constant bit rate encoding. - -@item on (@emph{vbr}) -Use variable bit rate encoding (the default). - -@item constrained (@emph{cvbr}) -Use constrained variable bit rate encoding. -@end table - -@item compression_level (@emph{comp}) -Set encoding algorithm complexity. Valid options are integers in -the 0-10 range. 0 gives the fastest encodes but lower quality, while 10 -gives the highest quality but slowest encoding. The default is 10. - -@item frame_duration (@emph{framesize}) -Set maximum frame size, or duration of a frame in milliseconds. The -argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller -frame sizes achieve lower latency but less quality at a given bitrate. -Sizes greater than 20ms are only interesting at fairly low bitrates. -The default is 20ms. - -@item packet_loss (@emph{expect-loss}) -Set expected packet loss percentage. The default is 0. - -@item application (N.A.) -Set intended application type. Valid options are listed below: - -@table @samp -@item voip -Favor improved speech intelligibility. -@item audio -Favor faithfulness to the input (the default). -@item lowdelay -Restrict to only the lowest delay modes. -@end table - -@item cutoff (N.A.) -Set cutoff bandwidth in Hz. The argument must be exactly one of the -following: 4000, 6000, 8000, 12000, or 20000, corresponding to -narrowband, mediumband, wideband, super wideband, and fullband -respectively. The default is 0 (cutoff disabled). - -@end table - -@section libvorbis - -libvorbis encoder wrapper. - -Requires the presence of the libvorbisenc headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libvorbis}. - -@subsection Options - -The following options are supported by the libvorbis wrapper. The -@command{oggenc}-equivalent of the options are listed in parentheses. - -To get a more accurate and extensive documentation of the libvorbis -options, consult the libvorbisenc's and @command{oggenc}'s documentations. -See @url{http://xiph.org/vorbis/}, -@url{http://wiki.xiph.org/Vorbis-tools}, and oggenc(1). - -@table @option -@item b (@emph{-b}) -Set bitrate expressed in bits/s for ABR. @command{oggenc} @option{-b} is -expressed in kilobits/s. - -@item q (@emph{-q}) -Set constant quality setting for VBR. The value should be a float -number in the range of -1.0 to 10.0. The higher the value, the better -the quality. The default value is @samp{3.0}. - -This option is valid only using the @command{ffmpeg} command-line tool. -For library interface users, use @option{global_quality}. - -@item cutoff (@emph{--advanced-encode-option lowpass_frequency=N}) -Set cutoff bandwidth in Hz, a value of 0 disables cutoff. @command{oggenc}'s -related option is expressed in kHz. The default value is @samp{0} (cutoff -disabled). - -@item minrate (@emph{-m}) -Set minimum bitrate expressed in bits/s. @command{oggenc} @option{-m} is -expressed in kilobits/s. - -@item maxrate (@emph{-M}) -Set maximum bitrate expressed in bits/s. @command{oggenc} @option{-M} is -expressed in kilobits/s. This only has effect on ABR mode. - -@item iblock (@emph{--advanced-encode-option impulse_noisetune=N}) -Set noise floor bias for impulse blocks. The value is a float number from --15.0 to 0.0. A negative bias instructs the encoder to pay special attention -to the crispness of transients in the encoded audio. The tradeoff for better -transient response is a higher bitrate. - -@end table - -@section libwavpack - -A wrapper providing WavPack encoding through libwavpack. - -Only lossless mode using 32-bit integer samples is supported currently. -The @option{compression_level} option can be used to control speed vs. -compression tradeoff, with the values mapped to libwavpack as follows: - -@table @option - -@item 0 -Fast mode - corresponding to the wavpack @option{-f} option. - -@item 1 -Normal (default) settings. - -@item 2 -High quality - corresponding to the wavpack @option{-h} option. - -@item 3 -Very high quality - corresponding to the wavpack @option{-hh} option. - -@item 4-8 -Same as 3, but with extra processing enabled - corresponding to the wavpack -@option{-x} option. I.e. 4 is the same as @option{-x2} and 8 is the same as -@option{-x6}. - -@end table - -@c man end AUDIO ENCODERS - -@chapter Video Encoders -@c man begin VIDEO ENCODERS - -A description of some of the currently available video encoders -follows. - -@section libtheora - -libtheora Theora encoder wrapper. - -Requires the presence of the libtheora headers and library during -configuration. You need to explicitly configure the build with -@code{--enable-libtheora}. - -For more informations about the libtheora project see -@url{http://www.theora.org/}. - -@subsection Options - -The following global options are mapped to internal libtheora options -which affect the quality and the bitrate of the encoded stream. - -@table @option -@item b -Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. In -case VBR (Variable Bit Rate) mode is enabled this option is ignored. - -@item flags -Used to enable constant quality mode (VBR) encoding through the -@option{qscale} flag, and to enable the @code{pass1} and @code{pass2} -modes. - -@item g -Set the GOP size. - -@item global_quality -Set the global quality as an integer in lambda units. - -Only relevant when VBR mode is enabled with @code{flags +qscale}. The -value is converted to QP units by dividing it by @code{FF_QP2LAMBDA}, -clipped in the [0 - 10] range, and then multiplied by 6.3 to get a -value in the native libtheora range [0-63]. A higher value corresponds -to a higher quality. - -@item q -Enable VBR mode when set to a non-negative value, and set constant -quality value as a double floating point value in QP units. - -The value is clipped in the [0-10] range, and then multiplied by 6.3 -to get a value in the native libtheora range [0-63]. - -This option is valid only using the @command{ffmpeg} command-line -tool. For library interface users, use @option{global_quality}. -@end table - -@subsection Examples - -@itemize -@item -Set maximum constant quality (VBR) encoding with @command{ffmpeg}: -@example -ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg -@end example - -@item -Use @command{ffmpeg} to convert a CBR 1000 kbps Theora video stream: -@example -ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg -@end example -@end itemize - -@section libvpx - -VP8 format supported through libvpx. - -Requires the presence of the libvpx headers and library during configuration. -You need to explicitly configure the build with @code{--enable-libvpx}. - -@subsection Options - -Mapping from FFmpeg to libvpx options with conversion notes in parentheses. - -@table @option - -@item threads -g_threads - -@item profile -g_profile - -@item vb -rc_target_bitrate - -@item g -kf_max_dist - -@item keyint_min -kf_min_dist - -@item qmin -rc_min_quantizer - -@item qmax -rc_max_quantizer - -@item bufsize, vb -rc_buf_sz -@code{(bufsize * 1000 / vb)} - -rc_buf_optimal_sz -@code{(bufsize * 1000 / vb * 5 / 6)} - -@item rc_init_occupancy, vb -rc_buf_initial_sz -@code{(rc_init_occupancy * 1000 / vb)} - -@item rc_buffer_aggressivity -rc_undershoot_pct - -@item skip_threshold -rc_dropframe_thresh - -@item qcomp -rc_2pass_vbr_bias_pct - -@item maxrate, vb -rc_2pass_vbr_maxsection_pct -@code{(maxrate * 100 / vb)} - -@item minrate, vb -rc_2pass_vbr_minsection_pct -@code{(minrate * 100 / vb)} - -@item minrate, maxrate, vb -@code{VPX_CBR} -@code{(minrate == maxrate == vb)} - -@item crf -@code{VPX_CQ}, @code{VP8E_SET_CQ_LEVEL} - -@item quality -@table @option -@item @var{best} -@code{VPX_DL_BEST_QUALITY} -@item @var{good} -@code{VPX_DL_GOOD_QUALITY} -@item @var{realtime} -@code{VPX_DL_REALTIME} -@end table - -@item speed -@code{VP8E_SET_CPUUSED} - -@item nr -@code{VP8E_SET_NOISE_SENSITIVITY} - -@item mb_threshold -@code{VP8E_SET_STATIC_THRESHOLD} - -@item slices -@code{VP8E_SET_TOKEN_PARTITIONS} - -@item max-intra-rate -@code{VP8E_SET_MAX_INTRA_BITRATE_PCT} - -@item force_key_frames -@code{VPX_EFLAG_FORCE_KF} - -@item Alternate reference frame related -@table @option -@item vp8flags altref -@code{VP8E_SET_ENABLEAUTOALTREF} -@item @var{arnr_max_frames} -@code{VP8E_SET_ARNR_MAXFRAMES} -@item @var{arnr_type} -@code{VP8E_SET_ARNR_TYPE} -@item @var{arnr_strength} -@code{VP8E_SET_ARNR_STRENGTH} -@item @var{rc_lookahead} -g_lag_in_frames -@end table - -@item vp8flags error_resilient -g_error_resilient - -@end table - -For more information about libvpx see: -@url{http://www.webmproject.org/} - - -@section libwebp - -libwebp WebP Image encoder wrapper - -libwebp is Google's official encoder for WebP images. It can encode in either -lossy or lossless mode. Lossy images are essentially a wrapper around a VP8 -frame. Lossless images are a separate codec developed by Google. - -@subsection Pixel Format - -Currently, libwebp only supports YUV420 for lossy and RGB for lossless due -to limitations of the format and libwebp. Alpha is supported for either mode. -Because of API limitations, if RGB is passed in when encoding lossy or YUV is -passed in for encoding lossless, the pixel format will automatically be -converted using functions from libwebp. This is not ideal and is done only for -convenience. - -@subsection Options - -@table @option - -@item -lossless @var{boolean} -Enables/Disables use of lossless mode. Default is 0. - -@item -compression_level @var{integer} -For lossy, this is a quality/speed tradeoff. Higher values give better quality -for a given size at the cost of increased encoding time. For lossless, this is -a size/speed tradeoff. Higher values give smaller size at the cost of increased -encoding time. More specifically, it controls the number of extra algorithms -and compression tools used, and varies the combination of these tools. This -maps to the @var{method} option in libwebp. The valid range is 0 to 6. -Default is 4. - -@item -qscale @var{float} -For lossy encoding, this controls image quality, 0 to 100. For lossless -encoding, this controls the effort and time spent at compressing more. The -default value is 75. Note that for usage via libavcodec, this option is called -@var{global_quality} and must be multiplied by @var{FF_QP2LAMBDA}. - -@item -preset @var{type} -Configuration preset. This does some automatic settings based on the general -type of the image. -@table @option -@item none -Do not use a preset. -@item default -Use the encoder default. -@item picture -Digital picture, like portrait, inner shot -@item photo -Outdoor photograph, with natural lighting -@item drawing -Hand or line drawing, with high-contrast details -@item icon -Small-sized colorful images -@item text -Text-like -@end table - -@end table - -@section libx264 - -x264 H.264/MPEG-4 AVC encoder wrapper. - -This encoder requires the presence of the libx264 headers and library -during configuration. You need to explicitly configure the build with -@code{--enable-libx264}. - -libx264 supports an impressive number of features, including 8x8 and -4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC -entropy coding, interlacing (MBAFF), lossless mode, psy optimizations -for detail retention (adaptive quantization, psy-RD, psy-trellis). - -Many libx264 encoder options are mapped to FFmpeg global codec -options, while unique encoder options are provided through private -options. Additionally the @option{x264opts} and @option{x264-params} -private options allows to pass a list of key=value tuples as accepted -by the libx264 @code{x264_param_parse} function. - -The x264 project website is at -@url{http://www.videolan.org/developers/x264.html}. - -@subsection Options - -The following options are supported by the libx264 wrapper. The -@command{x264}-equivalent options or values are listed in parentheses -for easy migration. - -To reduce the duplication of documentation, only the private options -and some others requiring special attention are documented here. For -the documentation of the undocumented generic options, see -@ref{codec-options,,the Codec Options chapter}. - -To get a more accurate and extensive documentation of the libx264 -options, invoke the command @command{x264 --full-help} or consult -the libx264 documentation. - -@table @option -@item b (@emph{bitrate}) -Set bitrate in bits/s. Note that FFmpeg's @option{b} option is -expressed in bits/s, while @command{x264}'s @option{bitrate} is in -kilobits/s. - -@item bf (@emph{bframes}) - -@item g (@emph{keyint}) - -@item qmax (@emph{qpmax}) - -@item qmin (@emph{qpmin}) - -@item qdiff (@emph{qpstep}) - -@item qblur (@emph{qblur}) - -@item qcomp (@emph{qcomp}) - -@item refs (@emph{ref}) - -@item sc_threshold (@emph{scenecut}) - -@item trellis (@emph{trellis}) - -@item nr (@emph{nr}) - -@item me_range (@emph{merange}) - -@item me_method (@emph{me}) -Set motion estimation method. Possible values in the decreasing order -of speed: - -@table @samp -@item dia (@emph{dia}) -@item epzs (@emph{dia}) -Diamond search with radius 1 (fastest). @samp{epzs} is an alias for -@samp{dia}. -@item hex (@emph{hex}) -Hexagonal search with radius 2. -@item umh (@emph{umh}) -Uneven multi-hexagon search. -@item esa (@emph{esa}) -Exhaustive search. -@item tesa (@emph{tesa}) -Hadamard exhaustive search (slowest). -@end table - -@item subq (@emph{subme}) - -@item b_strategy (@emph{b-adapt}) - -@item keyint_min (@emph{min-keyint}) - -@item coder -Set entropy encoder. Possible values: - -@table @samp -@item ac -Enable CABAC. - -@item vlc -Enable CAVLC and disable CABAC. It generates the same effect as -@command{x264}'s @option{--no-cabac} option. -@end table - -@item cmp -Set full pixel motion estimation comparation algorithm. Possible values: - -@table @samp -@item chroma -Enable chroma in motion estimation. - -@item sad -Ignore chroma in motion estimation. It generates the same effect as -@command{x264}'s @option{--no-chroma-me} option. -@end table - -@item threads (@emph{threads}) - -@item thread_type -Set multithreading technique. Possible values: - -@table @samp -@item slice -Slice-based multithreading. It generates the same effect as -@command{x264}'s @option{--sliced-threads} option. -@item frame -Frame-based multithreading. -@end table - -@item flags -Set encoding flags. It can be used to disable closed GOP and enable -open GOP by setting it to @code{-cgop}. The result is similar to -the behavior of @command{x264}'s @option{--open-gop} option. - -@item rc_init_occupancy (@emph{vbv-init}) - -@item preset (@emph{preset}) -Set the encoding preset. - -@item tune (@emph{tune}) -Set tuning of the encoding params. - -@item profile (@emph{profile}) -Set profile restrictions. - -@item fastfirstpass -Enable fast settings when encoding first pass, when set to 1. When set -to 0, it has the same effect of @command{x264}'s -@option{--slow-firstpass} option. - -@item crf (@emph{crf}) -Set the quality for constant quality mode. - -@item crf_max (@emph{crf-max}) -In CRF mode, prevents VBV from lowering quality beyond this point. - -@item qp (@emph{qp}) -Set constant quantization rate control method parameter. - -@item aq-mode (@emph{aq-mode}) -Set AQ method. Possible values: - -@table @samp -@item none (@emph{0}) -Disabled. - -@item variance (@emph{1}) -Variance AQ (complexity mask). - -@item autovariance (@emph{2}) -Auto-variance AQ (experimental). -@end table - -@item aq-strength (@emph{aq-strength}) -Set AQ strength, reduce blocking and blurring in flat and textured areas. - -@item psy -Use psychovisual optimizations when set to 1. When set to 0, it has the -same effect as @command{x264}'s @option{--no-psy} option. - -@item psy-rd (@emph{psy-rd}) -Set strength of psychovisual optimization, in -@var{psy-rd}:@var{psy-trellis} format. - -@item rc-lookahead (@emph{rc-lookahead}) -Set number of frames to look ahead for frametype and ratecontrol. - -@item weightb -Enable weighted prediction for B-frames when set to 1. When set to 0, -it has the same effect as @command{x264}'s @option{--no-weightb} option. - -@item weightp (@emph{weightp}) -Set weighted prediction method for P-frames. Possible values: - -@table @samp -@item none (@emph{0}) -Disabled -@item simple (@emph{1}) -Enable only weighted refs -@item smart (@emph{2}) -Enable both weighted refs and duplicates -@end table - -@item ssim (@emph{ssim}) -Enable calculation and printing SSIM stats after the encoding. - -@item intra-refresh (@emph{intra-refresh}) -Enable the use of Periodic Intra Refresh instead of IDR frames when set -to 1. - -@item bluray-compat (@emph{bluray-compat}) -Configure the encoder to be compatible with the bluray standard. -It is a shorthand for setting "bluray-compat=1 force-cfr=1". - -@item b-bias (@emph{b-bias}) -Set the influence on how often B-frames are used. - -@item b-pyramid (@emph{b-pyramid}) -Set method for keeping of some B-frames as references. Possible values: - -@table @samp -@item none (@emph{none}) -Disabled. -@item strict (@emph{strict}) -Strictly hierarchical pyramid. -@item normal (@emph{normal}) -Non-strict (not Blu-ray compatible). -@end table - -@item mixed-refs -Enable the use of one reference per partition, as opposed to one -reference per macroblock when set to 1. When set to 0, it has the -same effect as @command{x264}'s @option{--no-mixed-refs} option. - -@item 8x8dct -Enable adaptive spatial transform (high profile 8x8 transform) -when set to 1. When set to 0, it has the same effect as -@command{x264}'s @option{--no-8x8dct} option. - -@item fast-pskip -Enable early SKIP detection on P-frames when set to 1. When set -to 0, it has the same effect as @command{x264}'s -@option{--no-fast-pskip} option. - -@item aud (@emph{aud}) -Enable use of access unit delimiters when set to 1. - -@item mbtree -Enable use macroblock tree ratecontrol when set to 1. When set -to 0, it has the same effect as @command{x264}'s -@option{--no-mbtree} option. - -@item deblock (@emph{deblock}) -Set loop filter parameters, in @var{alpha}:@var{beta} form. - -@item cplxblur (@emph{cplxblur}) -Set fluctuations reduction in QP (before curve compression). - -@item partitions (@emph{partitions}) -Set partitions to consider as a comma-separated list of. Possible -values in the list: - -@table @samp -@item p8x8 -8x8 P-frame partition. -@item p4x4 -4x4 P-frame partition. -@item b8x8 -4x4 B-frame partition. -@item i8x8 -8x8 I-frame partition. -@item i4x4 -4x4 I-frame partition. -(Enabling @samp{p4x4} requires @samp{p8x8} to be enabled. Enabling -@samp{i8x8} requires adaptive spatial transform (@option{8x8dct} -option) to be enabled.) -@item none (@emph{none}) -Do not consider any partitions. -@item all (@emph{all}) -Consider every partition. -@end table - -@item direct-pred (@emph{direct}) -Set direct MV prediction mode. Possible values: - -@table @samp -@item none (@emph{none}) -Disable MV prediction. -@item spatial (@emph{spatial}) -Enable spatial predicting. -@item temporal (@emph{temporal}) -Enable temporal predicting. -@item auto (@emph{auto}) -Automatically decided. -@end table - -@item slice-max-size (@emph{slice-max-size}) -Set the limit of the size of each slice in bytes. If not specified -but RTP payload size (@option{ps}) is specified, that is used. - -@item stats (@emph{stats}) -Set the file name for multi-pass stats. - -@item nal-hrd (@emph{nal-hrd}) -Set signal HRD information (requires @option{vbv-bufsize} to be set). -Possible values: - -@table @samp -@item none (@emph{none}) -Disable HRD information signaling. -@item vbr (@emph{vbr}) -Variable bit rate. -@item cbr (@emph{cbr}) -Constant bit rate (not allowed in MP4 container). -@end table - -@item x264opts (N.A.) -Set any x264 option, see @command{x264 --fullhelp} for a list. - -Argument is a list of @var{key}=@var{value} couples separated by -":". In @var{filter} and @var{psy-rd} options that use ":" as a separator -themselves, use "," instead. They accept it as well since long ago but this -is kept undocumented for some reason. - -For example to specify libx264 encoding options with @command{ffmpeg}: -@example -ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv -@end example - -@item x264-params (N.A.) -Override the x264 configuration using a :-separated list of key=value -parameters. - -This option is functionally the same as the @option{x264opts}, but is -duplicated for compability with the Libav fork. - -For example to specify libx264 encoding options with @command{ffmpeg}: -@example -ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\ -cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\ -no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT -@end example -@end table - -Encoding ffpresets for common usages are provided so they can be used with the -general presets system (e.g. passing the @option{pre} option). - -@section libxvid - -Xvid MPEG-4 Part 2 encoder wrapper. - -This encoder requires the presence of the libxvidcore headers and library -during configuration. You need to explicitly configure the build with -@code{--enable-libxvid --enable-gpl}. - -The native @code{mpeg4} encoder supports the MPEG-4 Part 2 format, so -users can encode to this format without this library. - -@subsection Options - -The following options are supported by the libxvid wrapper. Some of -the following options are listed but are not documented, and -correspond to shared codec options. See @ref{codec-options,,the Codec -Options chapter} for their documentation. The other shared options -which are not listed have no effect for the libxvid encoder. - -@table @option -@item b - -@item g - -@item qmin - -@item qmax - -@item mpeg_quant - -@item threads - -@item bf - -@item b_qfactor - -@item b_qoffset - -@item flags -Set specific encoding flags. Possible values: - -@table @samp - -@item mv4 -Use four motion vector by macroblock. - -@item aic -Enable high quality AC prediction. - -@item gray -Only encode grayscale. - -@item gmc -Enable the use of global motion compensation (GMC). - -@item qpel -Enable quarter-pixel motion compensation. - -@item cgop -Enable closed GOP. - -@item global_header -Place global headers in extradata instead of every keyframe. - -@end table - -@item trellis - -@item me_method -Set motion estimation method. Possible values in decreasing order of -speed and increasing order of quality: - -@table @samp -@item zero -Use no motion estimation (default). - -@item phods -@item x1 -@item log -Enable advanced diamond zonal search for 16x16 blocks and half-pixel -refinement for 16x16 blocks. @samp{x1} and @samp{log} are aliases for -@samp{phods}. - -@item epzs -Enable all of the things described above, plus advanced diamond zonal -search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion -estimation on chroma planes. - -@item full -Enable all of the things described above, plus extended 16x16 and 8x8 -blocks search. -@end table - -@item mbd -Set macroblock decision algorithm. Possible values in the increasing -order of quality: - -@table @samp -@item simple -Use macroblock comparing function algorithm (default). - -@item bits -Enable rate distortion-based half pixel and quarter pixel refinement for -16x16 blocks. - -@item rd -Enable all of the things described above, plus rate distortion-based -half pixel and quarter pixel refinement for 8x8 blocks, and rate -distortion-based search using square pattern. -@end table - -@item lumi_aq -Enable lumi masking adaptive quantization when set to 1. Default is 0 -(disabled). - -@item variance_aq -Enable variance adaptive quantization when set to 1. Default is 0 -(disabled). - -When combined with @option{lumi_aq}, the resulting quality will not -be better than any of the two specified individually. In other -words, the resulting quality will be the worse one of the two -effects. - -@item ssim -Set structural similarity (SSIM) displaying method. Possible values: - -@table @samp -@item off -Disable displaying of SSIM information. - -@item avg -Output average SSIM at the end of encoding to stdout. The format of -showing the average SSIM is: - -@example -Average SSIM: %f -@end example - -For users who are not familiar with C, %f means a float number, or -a decimal (e.g. 0.939232). - -@item frame -Output both per-frame SSIM data during encoding and average SSIM at -the end of encoding to stdout. The format of per-frame information -is: - -@example - SSIM: avg: %1.3f min: %1.3f max: %1.3f -@end example - -For users who are not familiar with C, %1.3f means a float number -rounded to 3 digits after the dot (e.g. 0.932). - -@end table - -@item ssim_acc -Set SSIM accuracy. Valid options are integers within the range of -0-4, while 0 gives the most accurate result and 4 computes the -fastest. - -@end table - -@section png - -PNG image encoder. - -@subsection Private options - -@table @option -@item dpi @var{integer} -Set physical density of pixels, in dots per inch, unset by default -@item dpm @var{integer} -Set physical density of pixels, in dots per meter, unset by default -@end table - -@section ProRes - -Apple ProRes encoder. - -FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder. -The used encoder can be choosen with the @code{-vcodec} option. - -@subsection Private Options for prores-ks - -@table @option -@item profile @var{integer} -Select the ProRes profile to encode -@table @samp -@item proxy -@item lt -@item standard -@item hq -@item 4444 -@end table - -@item quant_mat @var{integer} -Select quantization matrix. -@table @samp -@item auto -@item default -@item proxy -@item lt -@item standard -@item hq -@end table -If set to @var{auto}, the matrix matching the profile will be picked. -If not set, the matrix providing the highest quality, @var{default}, will be -picked. - -@item bits_per_mb @var{integer} -How many bits to allot for coding one macroblock. Different profiles use -between 200 and 2400 bits per macroblock, the maximum is 8000. - -@item mbs_per_slice @var{integer} -Number of macroblocks in each slice (1-8); the default value (8) -should be good in almost all situations. - -@item vendor @var{string} -Override the 4-byte vendor ID. -A custom vendor ID like @var{apl0} would claim the stream was produced by -the Apple encoder. - -@item alpha_bits @var{integer} -Specify number of bits for alpha component. -Possible values are @var{0}, @var{8} and @var{16}. -Use @var{0} to disable alpha plane coding. - -@end table - -@subsection Speed considerations - -In the default mode of operation the encoder has to honor frame constraints -(i.e. not produc frames with size bigger than requested) while still making -output picture as good as possible. -A frame containing a lot of small details is harder to compress and the encoder -would spend more time searching for appropriate quantizers for each slice. - -Setting a higher @option{bits_per_mb} limit will improve the speed. - -For the fastest encoding speed set the @option{qscale} parameter (4 is the -recommended value) and do not set a size constraint. - -@c man end VIDEO ENCODERS diff --git a/ffmpeg/doc/errno.txt b/ffmpeg/doc/errno.txt deleted file mode 100644 index 31cab26..0000000 --- a/ffmpeg/doc/errno.txt +++ /dev/null @@ -1,174 +0,0 @@ -The following table lists most error codes found in various operating -systems supported by FFmpeg. - - OS -Code Std F LBMWwb Text (YMMV) - -E2BIG POSIX ++++++ Argument list too long -EACCES POSIX ++++++ Permission denied -EADDRINUSE POSIX +++..+ Address in use -EADDRNOTAVAIL POSIX +++..+ Cannot assign requested address -EADV +..... Advertise error -EAFNOSUPPORT POSIX +++..+ Address family not supported -EAGAIN POSIX + ++++++ Resource temporarily unavailable -EALREADY POSIX +++..+ Operation already in progress -EAUTH .++... Authentication error -EBADARCH ..+... Bad CPU type in executable -EBADE +..... Invalid exchange -EBADEXEC ..+... Bad executable -EBADF POSIX ++++++ Bad file descriptor -EBADFD +..... File descriptor in bad state -EBADMACHO ..+... Malformed Macho file -EBADMSG POSIX ++4... Bad message -EBADR +..... Invalid request descriptor -EBADRPC .++... RPC struct is bad -EBADRQC +..... Invalid request code -EBADSLT +..... Invalid slot -EBFONT +..... Bad font file format -EBUSY POSIX - ++++++ Device or resource busy -ECANCELED POSIX +++... Operation canceled -ECHILD POSIX ++++++ No child processes -ECHRNG +..... Channel number out of range -ECOMM +..... Communication error on send -ECONNABORTED POSIX +++..+ Software caused connection abort -ECONNREFUSED POSIX - +++ss+ Connection refused -ECONNRESET POSIX +++..+ Connection reset -EDEADLK POSIX ++++++ Resource deadlock avoided -EDEADLOCK +..++. File locking deadlock error -EDESTADDRREQ POSIX +++... Destination address required -EDEVERR ..+... Device error -EDOM C89 - ++++++ Numerical argument out of domain -EDOOFUS .F.... Programming error -EDOTDOT +..... RFS specific error -EDQUOT POSIX +++... Disc quota exceeded -EEXIST POSIX ++++++ File exists -EFAULT POSIX - ++++++ Bad address -EFBIG POSIX - ++++++ File too large -EFTYPE .++... Inappropriate file type or format -EHOSTDOWN +++... Host is down -EHOSTUNREACH POSIX +++..+ No route to host -EHWPOISON +..... Memory page has hardware error -EIDRM POSIX +++... Identifier removed -EILSEQ C99 ++++++ Illegal byte sequence -EINPROGRESS POSIX - +++ss+ Operation in progress -EINTR POSIX - ++++++ Interrupted system call -EINVAL POSIX + ++++++ Invalid argument -EIO POSIX + ++++++ I/O error -EISCONN POSIX +++..+ Socket is already connected -EISDIR POSIX ++++++ Is a directory -EISNAM +..... Is a named type file -EKEYEXPIRED +..... Key has expired -EKEYREJECTED +..... Key was rejected by service -EKEYREVOKED +..... Key has been revoked -EL2HLT +..... Level 2 halted -EL2NSYNC +..... Level 2 not synchronized -EL3HLT +..... Level 3 halted -EL3RST +..... Level 3 reset -ELIBACC +..... Can not access a needed shared library -ELIBBAD +..... Accessing a corrupted shared library -ELIBEXEC +..... Cannot exec a shared library directly -ELIBMAX +..... Too many shared libraries -ELIBSCN +..... .lib section in a.out corrupted -ELNRNG +..... Link number out of range -ELOOP POSIX +++..+ Too many levels of symbolic links -EMEDIUMTYPE +..... Wrong medium type -EMFILE POSIX ++++++ Too many open files -EMLINK POSIX ++++++ Too many links -EMSGSIZE POSIX +++..+ Message too long -EMULTIHOP POSIX ++4... Multihop attempted -ENAMETOOLONG POSIX - ++++++ Filen ame too long -ENAVAIL +..... No XENIX semaphores available -ENEEDAUTH .++... Need authenticator -ENETDOWN POSIX +++..+ Network is down -ENETRESET SUSv3 +++..+ Network dropped connection on reset -ENETUNREACH POSIX +++..+ Network unreachable -ENFILE POSIX ++++++ Too many open files in system -ENOANO +..... No anode -ENOATTR .++... Attribute not found -ENOBUFS POSIX - +++..+ No buffer space available -ENOCSI +..... No CSI structure available -ENODATA XSR +N4... No message available -ENODEV POSIX - ++++++ No such device -ENOENT POSIX - ++++++ No such file or directory -ENOEXEC POSIX ++++++ Exec format error -ENOFILE ...++. No such file or directory -ENOKEY +..... Required key not available -ENOLCK POSIX ++++++ No locks available -ENOLINK POSIX ++4... Link has been severed -ENOMEDIUM +..... No medium found -ENOMEM POSIX ++++++ Not enough space -ENOMSG POSIX +++..+ No message of desired type -ENONET +..... Machine is not on the network -ENOPKG +..... Package not installed -ENOPROTOOPT POSIX +++..+ Protocol not available -ENOSPC POSIX ++++++ No space left on device -ENOSR XSR +N4... No STREAM resources -ENOSTR XSR +N4... Not a STREAM -ENOSYS POSIX + ++++++ Function not implemented -ENOTBLK +++... Block device required -ENOTCONN POSIX +++..+ Socket is not connected -ENOTDIR POSIX ++++++ Not a directory -ENOTEMPTY POSIX ++++++ Directory not empty -ENOTNAM +..... Not a XENIX named type file -ENOTRECOVERABLE SUSv4 - +..... State not recoverable -ENOTSOCK POSIX +++..+ Socket operation on non-socket -ENOTSUP POSIX +++... Operation not supported -ENOTTY POSIX ++++++ Inappropriate I/O control operation -ENOTUNIQ +..... Name not unique on network -ENXIO POSIX ++++++ No such device or address -EOPNOTSUPP POSIX +++..+ Operation not supported (on socket) -EOVERFLOW POSIX +++..+ Value too large to be stored in data type -EOWNERDEAD SUSv4 +..... Owner died -EPERM POSIX - ++++++ Operation not permitted -EPFNOSUPPORT +++..+ Protocol family not supported -EPIPE POSIX - ++++++ Broken pipe -EPROCLIM .++... Too many processes -EPROCUNAVAIL .++... Bad procedure for program -EPROGMISMATCH .++... Program version wrong -EPROGUNAVAIL .++... RPC prog. not avail -EPROTO POSIX ++4... Protocol error -EPROTONOSUPPORT POSIX - +++ss+ Protocol not supported -EPROTOTYPE POSIX +++..+ Protocol wrong type for socket -EPWROFF ..+... Device power is off -ERANGE C89 - ++++++ Result too large -EREMCHG +..... Remote address changed -EREMOTE +++... Object is remote -EREMOTEIO +..... Remote I/O error -ERESTART +..... Interrupted system call should be restarted -ERFKILL +..... Operation not possible due to RF-kill -EROFS POSIX ++++++ Read-only file system -ERPCMISMATCH .++... RPC version wrong -ESHLIBVERS ..+... Shared library version mismatch -ESHUTDOWN +++..+ Cannot send after socket shutdown -ESOCKTNOSUPPORT +++... Socket type not supported -ESPIPE POSIX ++++++ Illegal seek -ESRCH POSIX ++++++ No such process -ESRMNT +..... Srmount error -ESTALE POSIX +++..+ Stale NFS file handle -ESTRPIPE +..... Streams pipe error -ETIME XSR +N4... Stream ioctl timeout -ETIMEDOUT POSIX - +++ss+ Connection timed out -ETOOMANYREFS +++... Too many references: cannot splice -ETXTBSY POSIX +++... Text file busy -EUCLEAN +..... Structure needs cleaning -EUNATCH +..... Protocol driver not attached -EUSERS +++... Too many users -EWOULDBLOCK POSIX +++..+ Operation would block -EXDEV POSIX ++++++ Cross-device link -EXFULL +..... Exchange full - -Notations: - -F: used in FFmpeg (-: a few times, +: a lot) - -SUSv3: Single Unix Specification, version 3 -SUSv4: Single Unix Specification, version 4 -XSR: XSI STREAMS (obsolete) - -OS: availability on some supported operating systems -L: GNU/Linux -B: BSD (F: FreeBSD, N: NetBSD) -M: MacOS X -W: Microsoft Windows (s: emulated with winsock, see libavformat/network.h) -w: Mingw32 (3.17) and Mingw64 (2.0.1) -b: BeOS diff --git a/ffmpeg/doc/examples/Makefile b/ffmpeg/doc/examples/Makefile deleted file mode 100644 index f085532..0000000 --- a/ffmpeg/doc/examples/Makefile +++ /dev/null @@ -1,39 +0,0 @@ -# use pkg-config for getting CFLAGS and LDLIBS -FFMPEG_LIBS= libavdevice \ - libavformat \ - libavfilter \ - libavcodec \ - libswresample \ - libswscale \ - libavutil \ - -CFLAGS += -Wall -g -CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS) -LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS) - -EXAMPLES= decoding_encoding \ - demuxing_decoding \ - filtering_video \ - filtering_audio \ - metadata \ - muxing \ - resampling_audio \ - scaling_video \ - transcode_aac \ - -OBJS=$(addsuffix .o,$(EXAMPLES)) - -# the following examples make explicit use of the math library -decoding_encoding: LDLIBS += -lm -muxing: LDLIBS += -lm -resampling_audio: LDLIBS += -lm - -.phony: all clean-test clean - -all: $(OBJS) $(EXAMPLES) - -clean-test: - $(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg - -clean: clean-test - $(RM) $(EXAMPLES) $(OBJS) diff --git a/ffmpeg/doc/examples/README b/ffmpeg/doc/examples/README deleted file mode 100644 index c1ce619..0000000 --- a/ffmpeg/doc/examples/README +++ /dev/null @@ -1,23 +0,0 @@ -FFmpeg examples README ----------------------- - -Both following use cases rely on pkg-config and make, thus make sure -that you have them installed and working on your system. - - -Method 1: build the installed examples in a generic read/write user directory - -Copy to a read/write user directory and just use "make", it will link -to the libraries on your system, assuming the PKG_CONFIG_PATH is -correctly configured. - -Method 2: build the examples in-tree - -Assuming you are in the source FFmpeg checkout directory, you need to build -FFmpeg (no need to make install in any prefix). Then just run "make examples". -This will build the examples using the FFmpeg build system. You can clean those -examples using "make examplesclean" - -If you want to try the dedicated Makefile examples (to emulate the first -method), go into doc/examples and run a command such as -PKG_CONFIG_PATH=pc-uninstalled make. diff --git a/ffmpeg/doc/examples/decoding_encoding.c b/ffmpeg/doc/examples/decoding_encoding.c deleted file mode 100644 index 08e8b92..0000000 --- a/ffmpeg/doc/examples/decoding_encoding.c +++ /dev/null @@ -1,653 +0,0 @@ -/* - * Copyright (c) 2001 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/** - * @file - * libavcodec API use example. - * - * Note that libavcodec only handles codecs (mpeg, mpeg4, etc...), - * not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the - * format handling - * @example doc/examples/decoding_encoding.c - */ - -#include <math.h> - -#include <libavutil/opt.h> -#include <libavcodec/avcodec.h> -#include <libavutil/channel_layout.h> -#include <libavutil/common.h> -#include <libavutil/imgutils.h> -#include <libavutil/mathematics.h> -#include <libavutil/samplefmt.h> - -#define INBUF_SIZE 4096 -#define AUDIO_INBUF_SIZE 20480 -#define AUDIO_REFILL_THRESH 4096 - -/* check that a given sample format is supported by the encoder */ -static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt) -{ - const enum AVSampleFormat *p = codec->sample_fmts; - - while (*p != AV_SAMPLE_FMT_NONE) { - if (*p == sample_fmt) - return 1; - p++; - } - return 0; -} - -/* just pick the highest supported samplerate */ -static int select_sample_rate(AVCodec *codec) -{ - const int *p; - int best_samplerate = 0; - - if (!codec->supported_samplerates) - return 44100; - - p = codec->supported_samplerates; - while (*p) { - best_samplerate = FFMAX(*p, best_samplerate); - p++; - } - return best_samplerate; -} - -/* select layout with the highest channel count */ -static int select_channel_layout(AVCodec *codec) -{ - const uint64_t *p; - uint64_t best_ch_layout = 0; - int best_nb_channels = 0; - - if (!codec->channel_layouts) - return AV_CH_LAYOUT_STEREO; - - p = codec->channel_layouts; - while (*p) { - int nb_channels = av_get_channel_layout_nb_channels(*p); - - if (nb_channels > best_nb_channels) { - best_ch_layout = *p; - best_nb_channels = nb_channels; - } - p++; - } - return best_ch_layout; -} - -/* - * Audio encoding example - */ -static void audio_encode_example(const char *filename) -{ - AVCodec *codec; - AVCodecContext *c= NULL; - AVFrame *frame; - AVPacket pkt; - int i, j, k, ret, got_output; - int buffer_size; - FILE *f; - uint16_t *samples; - float t, tincr; - - printf("Encode audio file %s\n", filename); - - /* find the MP2 encoder */ - codec = avcodec_find_encoder(AV_CODEC_ID_MP2); - if (!codec) { - fprintf(stderr, "Codec not found\n"); - exit(1); - } - - c = avcodec_alloc_context3(codec); - if (!c) { - fprintf(stderr, "Could not allocate audio codec context\n"); - exit(1); - } - - /* put sample parameters */ - c->bit_rate = 64000; - - /* check that the encoder supports s16 pcm input */ - c->sample_fmt = AV_SAMPLE_FMT_S16; - if (!check_sample_fmt(codec, c->sample_fmt)) { - fprintf(stderr, "Encoder does not support sample format %s", - av_get_sample_fmt_name(c->sample_fmt)); - exit(1); - } - - /* select other audio parameters supported by the encoder */ - c->sample_rate = select_sample_rate(codec); - c->channel_layout = select_channel_layout(codec); - c->channels = av_get_channel_layout_nb_channels(c->channel_layout); - - /* open it */ - if (avcodec_open2(c, codec, NULL) < 0) { - fprintf(stderr, "Could not open codec\n"); - exit(1); - } - - f = fopen(filename, "wb"); - if (!f) { - fprintf(stderr, "Could not open %s\n", filename); - exit(1); - } - - /* frame containing input raw audio */ - frame = av_frame_alloc(); - if (!frame) { - fprintf(stderr, "Could not allocate audio frame\n"); - exit(1); - } - - frame->nb_samples = c->frame_size; - frame->format = c->sample_fmt; - frame->channel_layout = c->channel_layout; - - /* the codec gives us the frame size, in samples, - * we calculate the size of the samples buffer in bytes */ - buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size, - c->sample_fmt, 0); - if (buffer_size < 0) { - fprintf(stderr, "Could not get sample buffer size\n"); - exit(1); - } - samples = av_malloc(buffer_size); - if (!samples) { - fprintf(stderr, "Could not allocate %d bytes for samples buffer\n", - buffer_size); - exit(1); - } - /* setup the data pointers in the AVFrame */ - ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, - (const uint8_t*)samples, buffer_size, 0); - if (ret < 0) { - fprintf(stderr, "Could not setup audio frame\n"); - exit(1); - } - - /* encode a single tone sound */ - t = 0; - tincr = 2 * M_PI * 440.0 / c->sample_rate; - for(i=0;i<200;i++) { - av_init_packet(&pkt); - pkt.data = NULL; // packet data will be allocated by the encoder - pkt.size = 0; - - for (j = 0; j < c->frame_size; j++) { - samples[2*j] = (int)(sin(t) * 10000); - - for (k = 1; k < c->channels; k++) - samples[2*j + k] = samples[2*j]; - t += tincr; - } - /* encode the samples */ - ret = avcodec_encode_audio2(c, &pkt, frame, &got_output); - if (ret < 0) { - fprintf(stderr, "Error encoding audio frame\n"); - exit(1); - } - if (got_output) { - fwrite(pkt.data, 1, pkt.size, f); - av_free_packet(&pkt); - } - } - - /* get the delayed frames */ - for (got_output = 1; got_output; i++) { - ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output); - if (ret < 0) { - fprintf(stderr, "Error encoding frame\n"); - exit(1); - } - - if (got_output) { - fwrite(pkt.data, 1, pkt.size, f); - av_free_packet(&pkt); - } - } - fclose(f); - - av_freep(&samples); - av_frame_free(&frame); - avcodec_close(c); - av_free(c); -} - -/* - * Audio decoding. - */ -static void audio_decode_example(const char *outfilename, const char *filename) -{ - AVCodec *codec; - AVCodecContext *c= NULL; - int len; - FILE *f, *outfile; - uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; - AVPacket avpkt; - AVFrame *decoded_frame = NULL; - - av_init_packet(&avpkt); - - printf("Decode audio file %s to %s\n", filename, outfilename); - - /* find the mpeg audio decoder */ - codec = avcodec_find_decoder(AV_CODEC_ID_MP2); - if (!codec) { - fprintf(stderr, "Codec not found\n"); - exit(1); - } - - c = avcodec_alloc_context3(codec); - if (!c) { - fprintf(stderr, "Could not allocate audio codec context\n"); - exit(1); - } - - /* open it */ - if (avcodec_open2(c, codec, NULL) < 0) { - fprintf(stderr, "Could not open codec\n"); - exit(1); - } - - f = fopen(filename, "rb"); - if (!f) { - fprintf(stderr, "Could not open %s\n", filename); - exit(1); - } - outfile = fopen(outfilename, "wb"); - if (!outfile) { - av_free(c); - exit(1); - } - - /* decode until eof */ - avpkt.data = inbuf; - avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f); - - while (avpkt.size > 0) { - int got_frame = 0; - - if (!decoded_frame) { - if (!(decoded_frame = av_frame_alloc())) { - fprintf(stderr, "Could not allocate audio frame\n"); - exit(1); - } - } - - len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt); - if (len < 0) { - fprintf(stderr, "Error while decoding\n"); - exit(1); - } - if (got_frame) { - /* if a frame has been decoded, output it */ - int data_size = av_samples_get_buffer_size(NULL, c->channels, - decoded_frame->nb_samples, - c->sample_fmt, 1); - fwrite(decoded_frame->data[0], 1, data_size, outfile); - } - avpkt.size -= len; - avpkt.data += len; - avpkt.dts = - avpkt.pts = AV_NOPTS_VALUE; - if (avpkt.size < AUDIO_REFILL_THRESH) { - /* Refill the input buffer, to avoid trying to decode - * incomplete frames. Instead of this, one could also use - * a parser, or use a proper container format through - * libavformat. */ - memmove(inbuf, avpkt.data, avpkt.size); - avpkt.data = inbuf; - len = fread(avpkt.data + avpkt.size, 1, - AUDIO_INBUF_SIZE - avpkt.size, f); - if (len > 0) - avpkt.size += len; - } - } - - fclose(outfile); - fclose(f); - - avcodec_close(c); - av_free(c); - av_frame_free(&decoded_frame); -} - -/* - * Video encoding example - */ -static void video_encode_example(const char *filename, int codec_id) -{ - AVCodec *codec; - AVCodecContext *c= NULL; - int i, ret, x, y, got_output; - FILE *f; - AVFrame *frame; - AVPacket pkt; - uint8_t endcode[] = { 0, 0, 1, 0xb7 }; - - printf("Encode video file %s\n", filename); - - /* find the mpeg1 video encoder */ - codec = avcodec_find_encoder(codec_id); - if (!codec) { - fprintf(stderr, "Codec not found\n"); - exit(1); - } - - c = avcodec_alloc_context3(codec); - if (!c) { - fprintf(stderr, "Could not allocate video codec context\n"); - exit(1); - } - - /* put sample parameters */ - c->bit_rate = 400000; - /* resolution must be a multiple of two */ - c->width = 352; - c->height = 288; - /* frames per second */ - c->time_base= (AVRational){1,25}; - c->gop_size = 10; /* emit one intra frame every ten frames */ - c->max_b_frames=1; - c->pix_fmt = AV_PIX_FMT_YUV420P; - - if(codec_id == AV_CODEC_ID_H264) - av_opt_set(c->priv_data, "preset", "slow", 0); - - /* open it */ - if (avcodec_open2(c, codec, NULL) < 0) { - fprintf(stderr, "Could not open codec\n"); - exit(1); - } - - f = fopen(filename, "wb"); - if (!f) { - fprintf(stderr, "Could not open %s\n", filename); - exit(1); - } - - frame = av_frame_alloc(); - if (!frame) { - fprintf(stderr, "Could not allocate video frame\n"); - exit(1); - } - frame->format = c->pix_fmt; - frame->width = c->width; - frame->height = c->height; - - /* the image can be allocated by any means and av_image_alloc() is - * just the most convenient way if av_malloc() is to be used */ - ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, - c->pix_fmt, 32); - if (ret < 0) { - fprintf(stderr, "Could not allocate raw picture buffer\n"); - exit(1); - } - - /* encode 1 second of video */ - for(i=0;i<25;i++) { - av_init_packet(&pkt); - pkt.data = NULL; // packet data will be allocated by the encoder - pkt.size = 0; - - fflush(stdout); - /* prepare a dummy image */ - /* Y */ - for(y=0;y<c->height;y++) { - for(x=0;x<c->width;x++) { - frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3; - } - } - - /* Cb and Cr */ - for(y=0;y<c->height/2;y++) { - for(x=0;x<c->width/2;x++) { - frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2; - frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5; - } - } - - frame->pts = i; - - /* encode the image */ - ret = avcodec_encode_video2(c, &pkt, frame, &got_output); - if (ret < 0) { - fprintf(stderr, "Error encoding frame\n"); - exit(1); - } - - if (got_output) { - printf("Write frame %3d (size=%5d)\n", i, pkt.size); - fwrite(pkt.data, 1, pkt.size, f); - av_free_packet(&pkt); - } - } - - /* get the delayed frames */ - for (got_output = 1; got_output; i++) { - fflush(stdout); - - ret = avcodec_encode_video2(c, &pkt, NULL, &got_output); - if (ret < 0) { - fprintf(stderr, "Error encoding frame\n"); - exit(1); - } - - if (got_output) { - printf("Write frame %3d (size=%5d)\n", i, pkt.size); - fwrite(pkt.data, 1, pkt.size, f); - av_free_packet(&pkt); - } - } - - /* add sequence end code to have a real mpeg file */ - fwrite(endcode, 1, sizeof(endcode), f); - fclose(f); - - avcodec_close(c); - av_free(c); - av_freep(&frame->data[0]); - av_frame_free(&frame); - printf("\n"); -} - -/* - * Video decoding example - */ - -static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, - char *filename) -{ - FILE *f; - int i; - - f=fopen(filename,"w"); - fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255); - for(i=0;i<ysize;i++) - fwrite(buf + i * wrap,1,xsize,f); - fclose(f); -} - -static int decode_write_frame(const char *outfilename, AVCodecContext *avctx, - AVFrame *frame, int *frame_count, AVPacket *pkt, int last) -{ - int len, got_frame; - char buf[1024]; - - len = avcodec_decode_video2(avctx, frame, &got_frame, pkt); - if (len < 0) { - fprintf(stderr, "Error while decoding frame %d\n", *frame_count); - return len; - } - if (got_frame) { - printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count); - fflush(stdout); - - /* the picture is allocated by the decoder, no need to free it */ - snprintf(buf, sizeof(buf), outfilename, *frame_count); - pgm_save(frame->data[0], frame->linesize[0], - avctx->width, avctx->height, buf); - (*frame_count)++; - } - if (pkt->data) { - pkt->size -= len; - pkt->data += len; - } - return 0; -} - -static void video_decode_example(const char *outfilename, const char *filename) -{ - AVCodec *codec; - AVCodecContext *c= NULL; - int frame_count; - FILE *f; - AVFrame *frame; - uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; - AVPacket avpkt; - - av_init_packet(&avpkt); - - /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */ - memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE); - - printf("Decode video file %s to %s\n", filename, outfilename); - - /* find the mpeg1 video decoder */ - codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO); - if (!codec) { - fprintf(stderr, "Codec not found\n"); - exit(1); - } - - c = avcodec_alloc_context3(codec); - if (!c) { - fprintf(stderr, "Could not allocate video codec context\n"); - exit(1); - } - - if(codec->capabilities&CODEC_CAP_TRUNCATED) - c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */ - - /* For some codecs, such as msmpeg4 and mpeg4, width and height - MUST be initialized there because this information is not - available in the bitstream. */ - - /* open it */ - if (avcodec_open2(c, codec, NULL) < 0) { - fprintf(stderr, "Could not open codec\n"); - exit(1); - } - - f = fopen(filename, "rb"); - if (!f) { - fprintf(stderr, "Could not open %s\n", filename); - exit(1); - } - - frame = av_frame_alloc(); - if (!frame) { - fprintf(stderr, "Could not allocate video frame\n"); - exit(1); - } - - frame_count = 0; - for(;;) { - avpkt.size = fread(inbuf, 1, INBUF_SIZE, f); - if (avpkt.size == 0) - break; - - /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio) - and this is the only method to use them because you cannot - know the compressed data size before analysing it. - - BUT some other codecs (msmpeg4, mpeg4) are inherently frame - based, so you must call them with all the data for one - frame exactly. You must also initialize 'width' and - 'height' before initializing them. */ - - /* NOTE2: some codecs allow the raw parameters (frame size, - sample rate) to be changed at any frame. We handle this, so - you should also take care of it */ - - /* here, we use a stream based decoder (mpeg1video), so we - feed decoder and see if it could decode a frame */ - avpkt.data = inbuf; - while (avpkt.size > 0) - if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0) - exit(1); - } - - /* some codecs, such as MPEG, transmit the I and P frame with a - latency of one frame. You must do the following to have a - chance to get the last frame of the video */ - avpkt.data = NULL; - avpkt.size = 0; - decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1); - - fclose(f); - - avcodec_close(c); - av_free(c); - av_frame_free(&frame); - printf("\n"); -} - -int main(int argc, char **argv) -{ - const char *output_type; - - /* register all the codecs */ - avcodec_register_all(); - - if (argc < 2) { - printf("usage: %s output_type\n" - "API example program to decode/encode a media stream with libavcodec.\n" - "This program generates a synthetic stream and encodes it to a file\n" - "named test.h264, test.mp2 or test.mpg depending on output_type.\n" - "The encoded stream is then decoded and written to a raw data output.\n" - "output_type must be choosen between 'h264', 'mp2', 'mpg'.\n", - argv[0]); - return 1; - } - output_type = argv[1]; - - if (!strcmp(output_type, "h264")) { - video_encode_example("test.h264", AV_CODEC_ID_H264); - } else if (!strcmp(output_type, "mp2")) { - audio_encode_example("test.mp2"); - audio_decode_example("test.sw", "test.mp2"); - } else if (!strcmp(output_type, "mpg")) { - video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO); - video_decode_example("test%02d.pgm", "test.mpg"); - } else { - fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n", - output_type); - return 1; - } - - return 0; -} diff --git a/ffmpeg/doc/examples/filtering_audio.c b/ffmpeg/doc/examples/filtering_audio.c deleted file mode 100644 index 1d66ca3..0000000 --- a/ffmpeg/doc/examples/filtering_audio.c +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright (c) 2010 Nicolas George - * Copyright (c) 2011 Stefano Sabatini - * Copyright (c) 2012 Clément Bœsch - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/** - * @file - * API example for audio decoding and filtering - * @example doc/examples/filtering_audio.c - */ - -#include <unistd.h> - -#include <libavcodec/avcodec.h> -#include <libavformat/avformat.h> -#include <libavfilter/avfiltergraph.h> -#include <libavfilter/avcodec.h> -#include <libavfilter/buffersink.h> -#include <libavfilter/buffersrc.h> -#include <libavutil/opt.h> - -static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono"; -static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -"; - -static AVFormatContext *fmt_ctx; -static AVCodecContext *dec_ctx; -AVFilterContext *buffersink_ctx; -AVFilterContext *buffersrc_ctx; -AVFilterGraph *filter_graph; -static int audio_stream_index = -1; - -static int open_input_file(const char *filename) -{ - int ret; - AVCodec *dec; - - if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n"); - return ret; - } - - if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n"); - return ret; - } - - /* select the audio stream */ - ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n"); - return ret; - } - audio_stream_index = ret; - dec_ctx = fmt_ctx->streams[audio_stream_index]->codec; - av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0); - - /* init the audio decoder */ - if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n"); - return ret; - } - - return 0; -} - -static int init_filters(const char *filters_descr) -{ - char args[512]; - int ret = 0; - AVFilter *abuffersrc = avfilter_get_by_name("abuffer"); - AVFilter *abuffersink = avfilter_get_by_name("abuffersink"); - AVFilterInOut *outputs = avfilter_inout_alloc(); - AVFilterInOut *inputs = avfilter_inout_alloc(); - static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 }; - static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 }; - static const int out_sample_rates[] = { 8000, -1 }; - const AVFilterLink *outlink; - AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base; - - filter_graph = avfilter_graph_alloc(); - if (!outputs || !inputs || !filter_graph) { - ret = AVERROR(ENOMEM); - goto end; - } - - /* buffer audio source: the decoded frames from the decoder will be inserted here. */ - if (!dec_ctx->channel_layout) - dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels); - snprintf(args, sizeof(args), - "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64, - time_base.num, time_base.den, dec_ctx->sample_rate, - av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout); - ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in", - args, NULL, filter_graph); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n"); - goto end; - } - - /* buffer audio sink: to terminate the filter chain. */ - ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out", - NULL, NULL, filter_graph); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n"); - goto end; - } - - ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1, - AV_OPT_SEARCH_CHILDREN); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n"); - goto end; - } - - ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1, - AV_OPT_SEARCH_CHILDREN); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n"); - goto end; - } - - ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1, - AV_OPT_SEARCH_CHILDREN); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n"); - goto end; - } - - /* Endpoints for the filter graph. */ - outputs->name = av_strdup("in"); - outputs->filter_ctx = buffersrc_ctx; - outputs->pad_idx = 0; - outputs->next = NULL; - - inputs->name = av_strdup("out"); - inputs->filter_ctx = buffersink_ctx; - inputs->pad_idx = 0; - inputs->next = NULL; - - if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, - &inputs, &outputs, NULL)) < 0) - goto end; - - if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) - goto end; - - /* Print summary of the sink buffer - * Note: args buffer is reused to store channel layout string */ - outlink = buffersink_ctx->inputs[0]; - av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout); - av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n", - (int)outlink->sample_rate, - (char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"), - args); - -end: - avfilter_inout_free(&inputs); - avfilter_inout_free(&outputs); - - return ret; -} - -static void print_frame(const AVFrame *frame) -{ - const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame)); - const uint16_t *p = (uint16_t*)frame->data[0]; - const uint16_t *p_end = p + n; - - while (p < p_end) { - fputc(*p & 0xff, stdout); - fputc(*p>>8 & 0xff, stdout); - p++; - } - fflush(stdout); -} - -int main(int argc, char **argv) -{ - int ret; - AVPacket packet0, packet; - AVFrame *frame = av_frame_alloc(); - AVFrame *filt_frame = av_frame_alloc(); - int got_frame; - - if (!frame || !filt_frame) { - perror("Could not allocate frame"); - exit(1); - } - if (argc != 2) { - fprintf(stderr, "Usage: %s file | %s\n", argv[0], player); - exit(1); - } - - avcodec_register_all(); - av_register_all(); - avfilter_register_all(); - - if ((ret = open_input_file(argv[1])) < 0) - goto end; - if ((ret = init_filters(filter_descr)) < 0) - goto end; - - /* read all packets */ - packet0.data = NULL; - packet.data = NULL; - while (1) { - if (!packet0.data) { - if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) - break; - packet0 = packet; - } - - if (packet.stream_index == audio_stream_index) { - avcodec_get_frame_defaults(frame); - got_frame = 0; - ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n"); - continue; - } - packet.size -= ret; - packet.data += ret; - - if (got_frame) { - /* push the audio data from decoded frame into the filtergraph */ - if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) { - av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n"); - break; - } - - /* pull filtered audio from the filtergraph */ - while (1) { - ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); - if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) - break; - if (ret < 0) - goto end; - print_frame(filt_frame); - av_frame_unref(filt_frame); - } - } - - if (packet.size <= 0) - av_free_packet(&packet0); - } else { - /* discard non-wanted packets */ - av_free_packet(&packet0); - } - } -end: - avfilter_graph_free(&filter_graph); - avcodec_close(dec_ctx); - avformat_close_input(&fmt_ctx); - av_frame_free(&frame); - av_frame_free(&filt_frame); - - if (ret < 0 && ret != AVERROR_EOF) { - fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); - exit(1); - } - - exit(0); -} diff --git a/ffmpeg/doc/examples/filtering_video.c b/ffmpeg/doc/examples/filtering_video.c deleted file mode 100644 index 790c641..0000000 --- a/ffmpeg/doc/examples/filtering_video.c +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Copyright (c) 2010 Nicolas George - * Copyright (c) 2011 Stefano Sabatini - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/** - * @file - * API example for decoding and filtering - * @example doc/examples/filtering_video.c - */ - -#define _XOPEN_SOURCE 600 /* for usleep */ -#include <unistd.h> - -#include <libavcodec/avcodec.h> -#include <libavformat/avformat.h> -#include <libavfilter/avfiltergraph.h> -#include <libavfilter/avcodec.h> -#include <libavfilter/buffersink.h> -#include <libavfilter/buffersrc.h> -#include <libavutil/opt.h> - -const char *filter_descr = "scale=78:24"; - -static AVFormatContext *fmt_ctx; -static AVCodecContext *dec_ctx; -AVFilterContext *buffersink_ctx; -AVFilterContext *buffersrc_ctx; -AVFilterGraph *filter_graph; -static int video_stream_index = -1; -static int64_t last_pts = AV_NOPTS_VALUE; - -static int open_input_file(const char *filename) -{ - int ret; - AVCodec *dec; - - if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n"); - return ret; - } - - if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n"); - return ret; - } - - /* select the video stream */ - ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n"); - return ret; - } - video_stream_index = ret; - dec_ctx = fmt_ctx->streams[video_stream_index]->codec; - av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0); - - /* init the video decoder */ - if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n"); - return ret; - } - - return 0; -} - -static int init_filters(const char *filters_descr) -{ - char args[512]; - int ret = 0; - AVFilter *buffersrc = avfilter_get_by_name("buffer"); - AVFilter *buffersink = avfilter_get_by_name("buffersink"); - AVFilterInOut *outputs = avfilter_inout_alloc(); - AVFilterInOut *inputs = avfilter_inout_alloc(); - enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; - - filter_graph = avfilter_graph_alloc(); - if (!outputs || !inputs || !filter_graph) { - ret = AVERROR(ENOMEM); - goto end; - } - - /* buffer video source: the decoded frames from the decoder will be inserted here. */ - snprintf(args, sizeof(args), - "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", - dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, - dec_ctx->time_base.num, dec_ctx->time_base.den, - dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den); - - ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", - args, NULL, filter_graph); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); - goto end; - } - - /* buffer video sink: to terminate the filter chain. */ - ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", - NULL, NULL, filter_graph); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); - goto end; - } - - ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts, - AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n"); - goto end; - } - - /* Endpoints for the filter graph. */ - outputs->name = av_strdup("in"); - outputs->filter_ctx = buffersrc_ctx; - outputs->pad_idx = 0; - outputs->next = NULL; - - inputs->name = av_strdup("out"); - inputs->filter_ctx = buffersink_ctx; - inputs->pad_idx = 0; - inputs->next = NULL; - - if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, - &inputs, &outputs, NULL)) < 0) - goto end; - - if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) - goto end; - -end: - avfilter_inout_free(&inputs); - avfilter_inout_free(&outputs); - - return ret; -} - -static void display_frame(const AVFrame *frame, AVRational time_base) -{ - int x, y; - uint8_t *p0, *p; - int64_t delay; - - if (frame->pts != AV_NOPTS_VALUE) { - if (last_pts != AV_NOPTS_VALUE) { - /* sleep roughly the right amount of time; - * usleep is in microseconds, just like AV_TIME_BASE. */ - delay = av_rescale_q(frame->pts - last_pts, - time_base, AV_TIME_BASE_Q); - if (delay > 0 && delay < 1000000) - usleep(delay); - } - last_pts = frame->pts; - } - - /* Trivial ASCII grayscale display. */ - p0 = frame->data[0]; - puts("\033c"); - for (y = 0; y < frame->height; y++) { - p = p0; - for (x = 0; x < frame->width; x++) - putchar(" .-+#"[*(p++) / 52]); - putchar('\n'); - p0 += frame->linesize[0]; - } - fflush(stdout); -} - -int main(int argc, char **argv) -{ - int ret; - AVPacket packet; - AVFrame *frame = av_frame_alloc(); - AVFrame *filt_frame = av_frame_alloc(); - int got_frame; - - if (!frame || !filt_frame) { - perror("Could not allocate frame"); - exit(1); - } - if (argc != 2) { - fprintf(stderr, "Usage: %s file\n", argv[0]); - exit(1); - } - - avcodec_register_all(); - av_register_all(); - avfilter_register_all(); - - if ((ret = open_input_file(argv[1])) < 0) - goto end; - if ((ret = init_filters(filter_descr)) < 0) - goto end; - - /* read all packets */ - while (1) { - if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) - break; - - if (packet.stream_index == video_stream_index) { - avcodec_get_frame_defaults(frame); - got_frame = 0; - ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); - break; - } - - if (got_frame) { - frame->pts = av_frame_get_best_effort_timestamp(frame); - - /* push the decoded frame into the filtergraph */ - if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { - av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); - break; - } - - /* pull filtered frames from the filtergraph */ - while (1) { - ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); - if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) - break; - if (ret < 0) - goto end; - display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base); - av_frame_unref(filt_frame); - } - av_frame_unref(frame); - } - } - av_free_packet(&packet); - } -end: - avfilter_graph_free(&filter_graph); - avcodec_close(dec_ctx); - avformat_close_input(&fmt_ctx); - av_frame_free(&frame); - av_frame_free(&filt_frame); - - if (ret < 0 && ret != AVERROR_EOF) { - fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); - exit(1); - } - - exit(0); -} diff --git a/ffmpeg/doc/examples/metadata.c b/ffmpeg/doc/examples/metadata.c deleted file mode 100644 index 9c1bcd7..0000000 --- a/ffmpeg/doc/examples/metadata.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2011 Reinhard Tartler - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/** - * @file - * Shows how the metadata API can be used in application programs. - * @example doc/examples/metadata.c - */ - -#include <stdio.h> - -#include <libavformat/avformat.h> -#include <libavutil/dict.h> - -int main (int argc, char **argv) -{ - AVFormatContext *fmt_ctx = NULL; - AVDictionaryEntry *tag = NULL; - int ret; - - if (argc != 2) { - printf("usage: %s <input_file>\n" - "example program to demonstrate the use of the libavformat metadata API.\n" - "\n", argv[0]); - return 1; - } - - av_register_all(); - if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL))) - return ret; - - while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) - printf("%s=%s\n", tag->key, tag->value); - - avformat_close_input(&fmt_ctx); - return 0; -} diff --git a/ffmpeg/doc/examples/muxing.c b/ffmpeg/doc/examples/muxing.c deleted file mode 100644 index 4cd3f65..0000000 --- a/ffmpeg/doc/examples/muxing.c +++ /dev/null @@ -1,571 +0,0 @@ -/* - * Copyright (c) 2003 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/** - * @file - * libavformat API example. - * - * Output a media file in any supported libavformat format. - * The default codecs are used. - * @example doc/examples/muxing.c - */ - -#include <stdlib.h> -#include <stdio.h> -#include <string.h> -#include <math.h> - -#include <libavutil/opt.h> -#include <libavutil/mathematics.h> -#include <libavformat/avformat.h> -#include <libswscale/swscale.h> -#include <libswresample/swresample.h> - -/* 5 seconds stream duration */ -#define STREAM_DURATION 200.0 -#define STREAM_FRAME_RATE 25 /* 25 images/s */ -#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) -#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ - -static int sws_flags = SWS_BICUBIC; - -/* Add an output stream. */ -static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, - enum AVCodecID codec_id) -{ - AVCodecContext *c; - AVStream *st; - - /* find the encoder */ - *codec = avcodec_find_encoder(codec_id); - if (!(*codec)) { - fprintf(stderr, "Could not find encoder for '%s'\n", - avcodec_get_name(codec_id)); - exit(1); - } - - st = avformat_new_stream(oc, *codec); - if (!st) { - fprintf(stderr, "Could not allocate stream\n"); - exit(1); - } - st->id = oc->nb_streams-1; - c = st->codec; - - switch ((*codec)->type) { - case AVMEDIA_TYPE_AUDIO: - c->sample_fmt = AV_SAMPLE_FMT_FLTP; - c->bit_rate = 64000; - c->sample_rate = 44100; - c->channels = 2; - break; - - case AVMEDIA_TYPE_VIDEO: - c->codec_id = codec_id; - - c->bit_rate = 400000; - /* Resolution must be a multiple of two. */ - c->width = 352; - c->height = 288; - /* timebase: This is the fundamental unit of time (in seconds) in terms - * of which frame timestamps are represented. For fixed-fps content, - * timebase should be 1/framerate and timestamp increments should be - * identical to 1. */ - c->time_base.den = STREAM_FRAME_RATE; - c->time_base.num = 1; - c->gop_size = 12; /* emit one intra frame every twelve frames at most */ - c->pix_fmt = STREAM_PIX_FMT; - if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { - /* just for testing, we also add B frames */ - c->max_b_frames = 2; - } - if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) { - /* Needed to avoid using macroblocks in which some coeffs overflow. - * This does not happen with normal video, it just happens here as - * the motion of the chroma plane does not match the luma plane. */ - c->mb_decision = 2; - } - break; - - default: - break; - } - - /* Some formats want stream headers to be separate. */ - if (oc->oformat->flags & AVFMT_GLOBALHEADER) - c->flags |= CODEC_FLAG_GLOBAL_HEADER; - - return st; -} - -/**************************************************************/ -/* audio output */ - -static float t, tincr, tincr2; - -static uint8_t **src_samples_data; -static int src_samples_linesize; -static int src_nb_samples; - -static int max_dst_nb_samples; -uint8_t **dst_samples_data; -int dst_samples_linesize; -int dst_samples_size; - -struct SwrContext *swr_ctx = NULL; - -static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) -{ - AVCodecContext *c; - int ret; - - c = st->codec; - - /* open it */ - ret = avcodec_open2(c, codec, NULL); - if (ret < 0) { - fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret)); - exit(1); - } - - /* init signal generator */ - t = 0; - tincr = 2 * M_PI * 110.0 / c->sample_rate; - /* increment frequency by 110 Hz per second */ - tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; - - src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ? - 10000 : c->frame_size; - - ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels, - src_nb_samples, AV_SAMPLE_FMT_S16, 0); - if (ret < 0) { - fprintf(stderr, "Could not allocate source samples\n"); - exit(1); - } - - /* compute the number of converted samples: buffering is avoided - * ensuring that the output buffer will contain at least all the - * converted input samples */ - max_dst_nb_samples = src_nb_samples; - - /* create resampler context */ - if (c->sample_fmt != AV_SAMPLE_FMT_S16) { - swr_ctx = swr_alloc(); - if (!swr_ctx) { - fprintf(stderr, "Could not allocate resampler context\n"); - exit(1); - } - - /* set options */ - av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0); - av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0); - av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); - av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0); - av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0); - av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0); - - /* initialize the resampling context */ - if ((ret = swr_init(swr_ctx)) < 0) { - fprintf(stderr, "Failed to initialize the resampling context\n"); - exit(1); - } - - ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels, - max_dst_nb_samples, c->sample_fmt, 0); - if (ret < 0) { - fprintf(stderr, "Could not allocate destination samples\n"); - exit(1); - } - } else { - dst_samples_data = src_samples_data; - } - dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples, - c->sample_fmt, 0); -} - -/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and - * 'nb_channels' channels. */ -static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) -{ - int j, i, v; - int16_t *q; - - q = samples; - for (j = 0; j < frame_size; j++) { - v = (int)(sin(t) * 10000); - for (i = 0; i < nb_channels; i++) - *q++ = v; - t += tincr; - tincr += tincr2; - } -} - -static void write_audio_frame(AVFormatContext *oc, AVStream *st) -{ - AVCodecContext *c; - AVPacket pkt = { 0 }; // data and size must be 0; - AVFrame *frame = av_frame_alloc(); - int got_packet, ret, dst_nb_samples; - - av_init_packet(&pkt); - c = st->codec; - - get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels); - - /* convert samples from native format to destination codec format, using the resampler */ - if (swr_ctx) { - /* compute destination number of samples */ - dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples, - c->sample_rate, c->sample_rate, AV_ROUND_UP); - if (dst_nb_samples > max_dst_nb_samples) { - av_free(dst_samples_data[0]); - ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels, - dst_nb_samples, c->sample_fmt, 0); - if (ret < 0) - exit(1); - max_dst_nb_samples = dst_nb_samples; - dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples, - c->sample_fmt, 0); - } - - /* convert to destination format */ - ret = swr_convert(swr_ctx, - dst_samples_data, dst_nb_samples, - (const uint8_t **)src_samples_data, src_nb_samples); - if (ret < 0) { - fprintf(stderr, "Error while converting\n"); - exit(1); - } - } else { - dst_nb_samples = src_nb_samples; - } - - frame->nb_samples = dst_nb_samples; - avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, - dst_samples_data[0], dst_samples_size, 0); - - ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); - if (ret < 0) { - fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret)); - exit(1); - } - - if (!got_packet) - goto freeframe; - - pkt.stream_index = st->index; - - /* Write the compressed frame to the media file. */ - ret = av_interleaved_write_frame(oc, &pkt); - if (ret != 0) { - fprintf(stderr, "Error while writing audio frame: %s\n", - av_err2str(ret)); - exit(1); - } -freeframe: - av_frame_free(&frame); -} - -static void close_audio(AVFormatContext *oc, AVStream *st) -{ - avcodec_close(st->codec); - if (dst_samples_data != src_samples_data) { - av_free(dst_samples_data[0]); - av_free(dst_samples_data); - } - av_free(src_samples_data[0]); - av_free(src_samples_data); -} - -/**************************************************************/ -/* video output */ - -static AVFrame *frame; -static AVPicture src_picture, dst_picture; -static int frame_count; - -static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st) -{ - int ret; - AVCodecContext *c = st->codec; - - /* open the codec */ - ret = avcodec_open2(c, codec, NULL); - if (ret < 0) { - fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret)); - exit(1); - } - - /* allocate and init a re-usable frame */ - frame = av_frame_alloc(); - if (!frame) { - fprintf(stderr, "Could not allocate video frame\n"); - exit(1); - } - - /* Allocate the encoded raw picture. */ - ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height); - if (ret < 0) { - fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret)); - exit(1); - } - - /* If the output format is not YUV420P, then a temporary YUV420P - * picture is needed too. It is then converted to the required - * output format. */ - if (c->pix_fmt != AV_PIX_FMT_YUV420P) { - ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height); - if (ret < 0) { - fprintf(stderr, "Could not allocate temporary picture: %s\n", - av_err2str(ret)); - exit(1); - } - } - - /* copy data and linesize picture pointers to frame */ - *((AVPicture *)frame) = dst_picture; -} - -/* Prepare a dummy image. */ -static void fill_yuv_image(AVPicture *pict, int frame_index, - int width, int height) -{ - int x, y, i; - - i = frame_index; - - /* Y */ - for (y = 0; y < height; y++) - for (x = 0; x < width; x++) - pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; - - /* Cb and Cr */ - for (y = 0; y < height / 2; y++) { - for (x = 0; x < width / 2; x++) { - pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; - pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; - } - } -} - -static void write_video_frame(AVFormatContext *oc, AVStream *st) -{ - int ret; - static struct SwsContext *sws_ctx; - AVCodecContext *c = st->codec; - - if (frame_count >= STREAM_NB_FRAMES) { - /* No more frames to compress. The codec has a latency of a few - * frames if using B-frames, so we get the last frames by - * passing the same picture again. */ - } else { - if (c->pix_fmt != AV_PIX_FMT_YUV420P) { - /* as we only generate a YUV420P picture, we must convert it - * to the codec pixel format if needed */ - if (!sws_ctx) { - sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P, - c->width, c->height, c->pix_fmt, - sws_flags, NULL, NULL, NULL); - if (!sws_ctx) { - fprintf(stderr, - "Could not initialize the conversion context\n"); - exit(1); - } - } - fill_yuv_image(&src_picture, frame_count, c->width, c->height); - sws_scale(sws_ctx, - (const uint8_t * const *)src_picture.data, src_picture.linesize, - 0, c->height, dst_picture.data, dst_picture.linesize); - } else { - fill_yuv_image(&dst_picture, frame_count, c->width, c->height); - } - } - - if (oc->oformat->flags & AVFMT_RAWPICTURE) { - /* Raw video case - directly store the picture in the packet */ - AVPacket pkt; - av_init_packet(&pkt); - - pkt.flags |= AV_PKT_FLAG_KEY; - pkt.stream_index = st->index; - pkt.data = dst_picture.data[0]; - pkt.size = sizeof(AVPicture); - - ret = av_interleaved_write_frame(oc, &pkt); - } else { - AVPacket pkt = { 0 }; - int got_packet; - av_init_packet(&pkt); - - /* encode the image */ - ret = avcodec_encode_video2(c, &pkt, frame, &got_packet); - if (ret < 0) { - fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); - exit(1); - } - /* If size is zero, it means the image was buffered. */ - - if (!ret && got_packet && pkt.size) { - pkt.stream_index = st->index; - - /* Write the compressed frame to the media file. */ - ret = av_interleaved_write_frame(oc, &pkt); - } else { - ret = 0; - } - } - if (ret != 0) { - fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); - exit(1); - } - frame_count++; -} - -static void close_video(AVFormatContext *oc, AVStream *st) -{ - avcodec_close(st->codec); - av_free(src_picture.data[0]); - av_free(dst_picture.data[0]); - av_free(frame); -} - -/**************************************************************/ -/* media file output */ - -int main(int argc, char **argv) -{ - const char *filename; - AVOutputFormat *fmt; - AVFormatContext *oc; - AVStream *audio_st, *video_st; - AVCodec *audio_codec, *video_codec; - double audio_time, video_time; - int ret; - - /* Initialize libavcodec, and register all codecs and formats. */ - av_register_all(); - - if (argc != 2) { - printf("usage: %s output_file\n" - "API example program to output a media file with libavformat.\n" - "This program generates a synthetic audio and video stream, encodes and\n" - "muxes them into a file named output_file.\n" - "The output format is automatically guessed according to the file extension.\n" - "Raw images can also be output by using '%%d' in the filename.\n" - "\n", argv[0]); - return 1; - } - - filename = argv[1]; - - /* allocate the output media context */ - avformat_alloc_output_context2(&oc, NULL, NULL, filename); - if (!oc) { - printf("Could not deduce output format from file extension: using MPEG.\n"); - avformat_alloc_output_context2(&oc, NULL, "mpeg", filename); - } - if (!oc) { - return 1; - } - fmt = oc->oformat; - - /* Add the audio and video streams using the default format codecs - * and initialize the codecs. */ - video_st = NULL; - audio_st = NULL; - - if (fmt->video_codec != AV_CODEC_ID_NONE) { - video_st = add_stream(oc, &video_codec, fmt->video_codec); - } - if (fmt->audio_codec != AV_CODEC_ID_NONE) { - audio_st = add_stream(oc, &audio_codec, fmt->audio_codec); - } - - /* Now that all the parameters are set, we can open the audio and - * video codecs and allocate the necessary encode buffers. */ - if (video_st) - open_video(oc, video_codec, video_st); - if (audio_st) - open_audio(oc, audio_codec, audio_st); - - av_dump_format(oc, 0, filename, 1); - - /* open the output file, if needed */ - if (!(fmt->flags & AVFMT_NOFILE)) { - ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); - if (ret < 0) { - fprintf(stderr, "Could not open '%s': %s\n", filename, - av_err2str(ret)); - return 1; - } - } - - /* Write the stream header, if any. */ - ret = avformat_write_header(oc, NULL); - if (ret < 0) { - fprintf(stderr, "Error occurred when opening output file: %s\n", - av_err2str(ret)); - return 1; - } - - if (frame) - frame->pts = 0; - for (;;) { - /* Compute current audio and video time. */ - audio_time = audio_st ? audio_st->pts.val * av_q2d(audio_st->time_base) : 0.0; - video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0; - - if ((!audio_st || audio_time >= STREAM_DURATION) && - (!video_st || video_time >= STREAM_DURATION)) - break; - - /* write interleaved audio and video frames */ - if (!video_st || (video_st && audio_st && audio_time < video_time)) { - write_audio_frame(oc, audio_st); - } else { - write_video_frame(oc, video_st); - frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base); - } - } - - /* Write the trailer, if any. The trailer must be written before you - * close the CodecContexts open when you wrote the header; otherwise - * av_write_trailer() may try to use memory that was freed on - * av_codec_close(). */ - av_write_trailer(oc); - - /* Close each codec. */ - if (video_st) - close_video(oc, video_st); - if (audio_st) - close_audio(oc, audio_st); - - if (!(fmt->flags & AVFMT_NOFILE)) - /* Close the output file. */ - avio_close(oc->pb); - - /* free the stream */ - avformat_free_context(oc); - - return 0; -} diff --git a/ffmpeg/doc/examples/pc-uninstalled/libavcodec.pc b/ffmpeg/doc/examples/pc-uninstalled/libavcodec.pc deleted file mode 100644 index a87ded7..0000000 --- a/ffmpeg/doc/examples/pc-uninstalled/libavcodec.pc +++ /dev/null @@ -1,12 +0,0 @@ -prefix= -exec_prefix= -libdir=${pcfiledir}/../../../libavcodec -includedir=${pcfiledir}/../../.. - -Name: libavcodec -Description: FFmpeg codec library -Version: 55.46.100 -Requires: libavutil = 52.59.100 -Conflicts: -Libs: -L${libdir} -Wl,-rpath,${libdir} -lavcodec -Cflags: -I${includedir} diff --git a/ffmpeg/doc/examples/pc-uninstalled/libavdevice.pc b/ffmpeg/doc/examples/pc-uninstalled/libavdevice.pc deleted file mode 100644 index 7f05a29..0000000 --- a/ffmpeg/doc/examples/pc-uninstalled/libavdevice.pc +++ /dev/null @@ -1,12 +0,0 @@ -prefix= -exec_prefix= -libdir=${pcfiledir}/../../../libavdevice -includedir=${pcfiledir}/../../.. - -Name: libavdevice -Description: FFmpeg device handling library -Version: 55.5.102 -Requires: libavfilter = 4.0.103, libavformat = 55.22.100 -Conflicts: -Libs: -L${libdir} -Wl,-rpath,${libdir} -lavdevice -Cflags: -I${includedir} diff --git a/ffmpeg/doc/examples/pc-uninstalled/libavfilter.pc b/ffmpeg/doc/examples/pc-uninstalled/libavfilter.pc deleted file mode 100644 index b42f95d..0000000 --- a/ffmpeg/doc/examples/pc-uninstalled/libavfilter.pc +++ /dev/null @@ -1,12 +0,0 @@ -prefix= -exec_prefix= -libdir=${pcfiledir}/../../../libavfilter -includedir=${pcfiledir}/../../.. - -Name: libavfilter -Description: FFmpeg audio/video filtering library -Version: 4.0.103 -Requires: libpostproc = 52.3.100, libswresample = 0.17.104, libswscale = 2.5.101, libavformat = 55.22.100, libavcodec = 55.46.100, libavutil = 52.59.100 -Conflicts: -Libs: -L${libdir} -Wl,-rpath,${libdir} -lavfilter -Cflags: -I${includedir} diff --git a/ffmpeg/doc/examples/pc-uninstalled/libavformat.pc b/ffmpeg/doc/examples/pc-uninstalled/libavformat.pc deleted file mode 100644 index 8bab324..0000000 --- a/ffmpeg/doc/examples/pc-uninstalled/libavformat.pc +++ /dev/null @@ -1,12 +0,0 @@ -prefix= -exec_prefix= -libdir=${pcfiledir}/../../../libavformat -includedir=${pcfiledir}/../../.. - -Name: libavformat -Description: FFmpeg container format library -Version: 55.22.100 -Requires: libavcodec = 55.46.100 -Conflicts: -Libs: -L${libdir} -Wl,-rpath,${libdir} -lavformat -Cflags: -I${includedir} diff --git a/ffmpeg/doc/examples/pc-uninstalled/libavutil.pc b/ffmpeg/doc/examples/pc-uninstalled/libavutil.pc deleted file mode 100644 index 85df0f0..0000000 --- a/ffmpeg/doc/examples/pc-uninstalled/libavutil.pc +++ /dev/null @@ -1,12 +0,0 @@ -prefix= -exec_prefix= -libdir=${pcfiledir}/../../../libavutil -includedir=${pcfiledir}/../../.. - -Name: libavutil -Description: FFmpeg utility library -Version: 52.59.100 -Requires: -Conflicts: -Libs: -L${libdir} -Wl,-rpath,${libdir} -lavutil -Cflags: -I${includedir} diff --git a/ffmpeg/doc/examples/pc-uninstalled/libpostproc.pc b/ffmpeg/doc/examples/pc-uninstalled/libpostproc.pc deleted file mode 100644 index 94da503..0000000 --- a/ffmpeg/doc/examples/pc-uninstalled/libpostproc.pc +++ /dev/null @@ -1,12 +0,0 @@ -prefix= -exec_prefix= -libdir=${pcfiledir}/../../../libpostproc -includedir=${pcfiledir}/../../.. - -Name: libpostproc -Description: FFmpeg postprocessing library -Version: 52.3.100 -Requires: libavutil = 52.59.100 -Conflicts: -Libs: -L${libdir} -Wl,-rpath,${libdir} -lpostproc -Cflags: -I${includedir} diff --git a/ffmpeg/doc/examples/pc-uninstalled/libswresample.pc b/ffmpeg/doc/examples/pc-uninstalled/libswresample.pc deleted file mode 100644 index 45bfa4a..0000000 --- a/ffmpeg/doc/examples/pc-uninstalled/libswresample.pc +++ /dev/null @@ -1,12 +0,0 @@ -prefix= -exec_prefix= -libdir=${pcfiledir}/../../../libswresample -includedir=${pcfiledir}/../../.. - -Name: libswresample -Description: FFmpeg audio resampling library -Version: 0.17.104 -Requires: libavutil = 52.59.100 -Conflicts: -Libs: -L${libdir} -Wl,-rpath,${libdir} -lswresample -Cflags: -I${includedir} diff --git a/ffmpeg/doc/examples/pc-uninstalled/libswscale.pc b/ffmpeg/doc/examples/pc-uninstalled/libswscale.pc deleted file mode 100644 index 8693580..0000000 --- a/ffmpeg/doc/examples/pc-uninstalled/libswscale.pc +++ /dev/null @@ -1,12 +0,0 @@ -prefix= -exec_prefix= -libdir=${pcfiledir}/../../../libswscale -includedir=${pcfiledir}/../../.. - -Name: libswscale -Description: FFmpeg image rescaling library -Version: 2.5.101 -Requires: libavutil = 52.59.100 -Conflicts: -Libs: -L${libdir} -Wl,-rpath,${libdir} -lswscale -Cflags: -I${includedir} diff --git a/ffmpeg/doc/examples/resampling_audio.c b/ffmpeg/doc/examples/resampling_audio.c deleted file mode 100644 index a15e042..0000000 --- a/ffmpeg/doc/examples/resampling_audio.c +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Copyright (c) 2012 Stefano Sabatini - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/** - * @example doc/examples/resampling_audio.c - * libswresample API use example. - */ - -#include <libavutil/opt.h> -#include <libavutil/channel_layout.h> -#include <libavutil/samplefmt.h> -#include <libswresample/swresample.h> - -static int get_format_from_sample_fmt(const char **fmt, - enum AVSampleFormat sample_fmt) -{ - int i; - struct sample_fmt_entry { - enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le; - } sample_fmt_entries[] = { - { AV_SAMPLE_FMT_U8, "u8", "u8" }, - { AV_SAMPLE_FMT_S16, "s16be", "s16le" }, - { AV_SAMPLE_FMT_S32, "s32be", "s32le" }, - { AV_SAMPLE_FMT_FLT, "f32be", "f32le" }, - { AV_SAMPLE_FMT_DBL, "f64be", "f64le" }, - }; - *fmt = NULL; - - for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) { - struct sample_fmt_entry *entry = &sample_fmt_entries[i]; - if (sample_fmt == entry->sample_fmt) { - *fmt = AV_NE(entry->fmt_be, entry->fmt_le); - return 0; - } - } - - fprintf(stderr, - "Sample format %s not supported as output format\n", - av_get_sample_fmt_name(sample_fmt)); - return AVERROR(EINVAL); -} - -/** - * Fill dst buffer with nb_samples, generated starting from t. - */ -static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t) -{ - int i, j; - double tincr = 1.0 / sample_rate, *dstp = dst; - const double c = 2 * M_PI * 440.0; - - /* generate sin tone with 440Hz frequency and duplicated channels */ - for (i = 0; i < nb_samples; i++) { - *dstp = sin(c * *t); - for (j = 1; j < nb_channels; j++) - dstp[j] = dstp[0]; - dstp += nb_channels; - *t += tincr; - } -} - -int main(int argc, char **argv) -{ - int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND; - int src_rate = 48000, dst_rate = 44100; - uint8_t **src_data = NULL, **dst_data = NULL; - int src_nb_channels = 0, dst_nb_channels = 0; - int src_linesize, dst_linesize; - int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples; - enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16; - const char *dst_filename = NULL; - FILE *dst_file; - int dst_bufsize; - const char *fmt; - struct SwrContext *swr_ctx; - double t; - int ret; - - if (argc != 2) { - fprintf(stderr, "Usage: %s output_file\n" - "API example program to show how to resample an audio stream with libswresample.\n" - "This program generates a series of audio frames, resamples them to a specified " - "output format and rate and saves them to an output file named output_file.\n", - argv[0]); - exit(1); - } - dst_filename = argv[1]; - - dst_file = fopen(dst_filename, "wb"); - if (!dst_file) { - fprintf(stderr, "Could not open destination file %s\n", dst_filename); - exit(1); - } - - /* create resampler context */ - swr_ctx = swr_alloc(); - if (!swr_ctx) { - fprintf(stderr, "Could not allocate resampler context\n"); - ret = AVERROR(ENOMEM); - goto end; - } - - /* set options */ - av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0); - av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0); - av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0); - - av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0); - av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0); - av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0); - - /* initialize the resampling context */ - if ((ret = swr_init(swr_ctx)) < 0) { - fprintf(stderr, "Failed to initialize the resampling context\n"); - goto end; - } - - /* allocate source and destination samples buffers */ - - src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout); - ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels, - src_nb_samples, src_sample_fmt, 0); - if (ret < 0) { - fprintf(stderr, "Could not allocate source samples\n"); - goto end; - } - - /* compute the number of converted samples: buffering is avoided - * ensuring that the output buffer will contain at least all the - * converted input samples */ - max_dst_nb_samples = dst_nb_samples = - av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP); - - /* buffer is going to be directly written to a rawaudio file, no alignment */ - dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout); - ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels, - dst_nb_samples, dst_sample_fmt, 0); - if (ret < 0) { - fprintf(stderr, "Could not allocate destination samples\n"); - goto end; - } - - t = 0; - do { - /* generate synthetic audio */ - fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t); - - /* compute destination number of samples */ - dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) + - src_nb_samples, dst_rate, src_rate, AV_ROUND_UP); - if (dst_nb_samples > max_dst_nb_samples) { - av_free(dst_data[0]); - ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels, - dst_nb_samples, dst_sample_fmt, 1); - if (ret < 0) - break; - max_dst_nb_samples = dst_nb_samples; - } - - /* convert to destination format */ - ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples); - if (ret < 0) { - fprintf(stderr, "Error while converting\n"); - goto end; - } - dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels, - ret, dst_sample_fmt, 1); - if (dst_bufsize < 0) { - fprintf(stderr, "Could not get sample buffer size\n"); - goto end; - } - printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret); - fwrite(dst_data[0], 1, dst_bufsize, dst_file); - } while (t < 10); - - if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0) - goto end; - fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n" - "ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n", - fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename); - -end: - if (dst_file) - fclose(dst_file); - - if (src_data) - av_freep(&src_data[0]); - av_freep(&src_data); - - if (dst_data) - av_freep(&dst_data[0]); - av_freep(&dst_data); - - swr_free(&swr_ctx); - return ret < 0; -} diff --git a/ffmpeg/doc/examples/scaling_video.c b/ffmpeg/doc/examples/scaling_video.c deleted file mode 100644 index be2c510..0000000 --- a/ffmpeg/doc/examples/scaling_video.c +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2012 Stefano Sabatini - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/** - * @file - * libswscale API use example. - * @example doc/examples/scaling_video.c - */ - -#include <libavutil/imgutils.h> -#include <libavutil/parseutils.h> -#include <libswscale/swscale.h> - -static void fill_yuv_image(uint8_t *data[4], int linesize[4], - int width, int height, int frame_index) -{ - int x, y; - - /* Y */ - for (y = 0; y < height; y++) - for (x = 0; x < width; x++) - data[0][y * linesize[0] + x] = x + y + frame_index * 3; - - /* Cb and Cr */ - for (y = 0; y < height / 2; y++) { - for (x = 0; x < width / 2; x++) { - data[1][y * linesize[1] + x] = 128 + y + frame_index * 2; - data[2][y * linesize[2] + x] = 64 + x + frame_index * 5; - } - } -} - -int main(int argc, char **argv) -{ - uint8_t *src_data[4], *dst_data[4]; - int src_linesize[4], dst_linesize[4]; - int src_w = 320, src_h = 240, dst_w, dst_h; - enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24; - const char *dst_size = NULL; - const char *dst_filename = NULL; - FILE *dst_file; - int dst_bufsize; - struct SwsContext *sws_ctx; - int i, ret; - - if (argc != 3) { - fprintf(stderr, "Usage: %s output_file output_size\n" - "API example program to show how to scale an image with libswscale.\n" - "This program generates a series of pictures, rescales them to the given " - "output_size and saves them to an output file named output_file\n." - "\n", argv[0]); - exit(1); - } - dst_filename = argv[1]; - dst_size = argv[2]; - - if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) { - fprintf(stderr, - "Invalid size '%s', must be in the form WxH or a valid size abbreviation\n", - dst_size); - exit(1); - } - - dst_file = fopen(dst_filename, "wb"); - if (!dst_file) { - fprintf(stderr, "Could not open destination file %s\n", dst_filename); - exit(1); - } - - /* create scaling context */ - sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt, - dst_w, dst_h, dst_pix_fmt, - SWS_BILINEAR, NULL, NULL, NULL); - if (!sws_ctx) { - fprintf(stderr, - "Impossible to create scale context for the conversion " - "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", - av_get_pix_fmt_name(src_pix_fmt), src_w, src_h, - av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h); - ret = AVERROR(EINVAL); - goto end; - } - - /* allocate source and destination image buffers */ - if ((ret = av_image_alloc(src_data, src_linesize, - src_w, src_h, src_pix_fmt, 16)) < 0) { - fprintf(stderr, "Could not allocate source image\n"); - goto end; - } - - /* buffer is going to be written to rawvideo file, no alignment */ - if ((ret = av_image_alloc(dst_data, dst_linesize, - dst_w, dst_h, dst_pix_fmt, 1)) < 0) { - fprintf(stderr, "Could not allocate destination image\n"); - goto end; - } - dst_bufsize = ret; - - for (i = 0; i < 100; i++) { - /* generate synthetic video */ - fill_yuv_image(src_data, src_linesize, src_w, src_h, i); - - /* convert to destination format */ - sws_scale(sws_ctx, (const uint8_t * const*)src_data, - src_linesize, 0, src_h, dst_data, dst_linesize); - - /* write scaled image to file */ - fwrite(dst_data[0], 1, dst_bufsize, dst_file); - } - - fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n" - "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n", - av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename); - -end: - if (dst_file) - fclose(dst_file); - av_freep(&src_data[0]); - av_freep(&dst_data[0]); - sws_freeContext(sws_ctx); - return ret < 0; -} diff --git a/ffmpeg/doc/faq.texi b/ffmpeg/doc/faq.texi deleted file mode 100644 index c47d9d9..0000000 --- a/ffmpeg/doc/faq.texi +++ /dev/null @@ -1,556 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg FAQ -@titlepage -@center @titlefont{FFmpeg FAQ} -@end titlepage - -@top - -@contents - -@chapter General Questions - -@section Why doesn't FFmpeg support feature [xyz]? - -Because no one has taken on that task yet. FFmpeg development is -driven by the tasks that are important to the individual developers. -If there is a feature that is important to you, the best way to get -it implemented is to undertake the task yourself or sponsor a developer. - -@section FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it? - -No. Windows DLLs are not portable, bloated and often slow. -Moreover FFmpeg strives to support all codecs natively. -A DLL loader is not conducive to that goal. - -@section I cannot read this file although this format seems to be supported by ffmpeg. - -Even if ffmpeg can read the container format, it may not support all its -codecs. Please consult the supported codec list in the ffmpeg -documentation. - -@section Which codecs are supported by Windows? - -Windows does not support standard formats like MPEG very well, unless you -install some additional codecs. - -The following list of video codecs should work on most Windows systems: -@table @option -@item msmpeg4v2 -.avi/.asf -@item msmpeg4 -.asf only -@item wmv1 -.asf only -@item wmv2 -.asf only -@item mpeg4 -Only if you have some MPEG-4 codec like ffdshow or Xvid installed. -@item mpeg1video -.mpg only -@end table -Note, ASF files often have .wmv or .wma extensions in Windows. It should also -be mentioned that Microsoft claims a patent on the ASF format, and may sue -or threaten users who create ASF files with non-Microsoft software. It is -strongly advised to avoid ASF where possible. - -The following list of audio codecs should work on most Windows systems: -@table @option -@item adpcm_ima_wav -@item adpcm_ms -@item pcm_s16le -always -@item libmp3lame -If some MP3 codec like LAME is installed. -@end table - - -@chapter Compilation - -@section @code{error: can't find a register in class 'GENERAL_REGS' while reloading 'asm'} - -This is a bug in gcc. Do not report it to us. Instead, please report it to -the gcc developers. Note that we will not add workarounds for gcc bugs. - -Also note that (some of) the gcc developers believe this is not a bug or -not a bug they should fix: -@url{http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203}. -Then again, some of them do not know the difference between an undecidable -problem and an NP-hard problem... - -@section I have installed this library with my distro's package manager. Why does @command{configure} not see it? - -Distributions usually split libraries in several packages. The main package -contains the files necessary to run programs using the library. The -development package contains the files necessary to build programs using the -library. Sometimes, docs and/or data are in a separate package too. - -To build FFmpeg, you need to install the development package. It is usually -called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the -build is finished, but be sure to keep the main package. - -@chapter Usage - -@section ffmpeg does not work; what is wrong? - -Try a @code{make distclean} in the ffmpeg source directory before the build. -If this does not help see -(@url{http://ffmpeg.org/bugreports.html}). - -@section How do I encode single pictures into movies? - -First, rename your pictures to follow a numerical sequence. -For example, img1.jpg, img2.jpg, img3.jpg,... -Then you may run: - -@example -ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg -@end example - -Notice that @samp{%d} is replaced by the image number. - -@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc. - -Use the @option{-start_number} option to declare a starting number for -the sequence. This is useful if your sequence does not start with -@file{img001.jpg} but is still in a numerical order. The following -example will start with @file{img100.jpg}: - -@example -ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg -@end example - -If you have large number of pictures to rename, you can use the -following command to ease the burden. The command, using the bourne -shell syntax, symbolically links all files in the current directory -that match @code{*jpg} to the @file{/tmp} directory in the sequence of -@file{img001.jpg}, @file{img002.jpg} and so on. - -@example -x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done -@end example - -If you want to sequence them by oldest modified first, substitute -@code{$(ls -r -t *jpg)} in place of @code{*jpg}. - -Then run: - -@example -ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg -@end example - -The same logic is used for any image format that ffmpeg reads. - -You can also use @command{cat} to pipe images to ffmpeg: - -@example -cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg -@end example - -@section How do I encode movie to single pictures? - -Use: - -@example -ffmpeg -i movie.mpg movie%d.jpg -@end example - -The @file{movie.mpg} used as input will be converted to -@file{movie1.jpg}, @file{movie2.jpg}, etc... - -Instead of relying on file format self-recognition, you may also use -@table @option -@item -c:v ppm -@item -c:v png -@item -c:v mjpeg -@end table -to force the encoding. - -Applying that to the previous example: -@example -ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg -@end example - -Beware that there is no "jpeg" codec. Use "mjpeg" instead. - -@section Why do I see a slight quality degradation with multithreaded MPEG* encoding? - -For multithreaded MPEG* encoding, the encoded slices must be independent, -otherwise thread n would practically have to wait for n-1 to finish, so it's -quite logical that there is a small reduction of quality. This is not a bug. - -@section How can I read from the standard input or write to the standard output? - -Use @file{-} as file name. - -@section -f jpeg doesn't work. - -Try '-f image2 test%d.jpg'. - -@section Why can I not change the frame rate? - -Some codecs, like MPEG-1/2, only allow a small number of fixed frame rates. -Choose a different codec with the -c:v command line option. - -@section How do I encode Xvid or DivX video with ffmpeg? - -Both Xvid and DivX (version 4+) are implementations of the ISO MPEG-4 -standard (note that there are many other coding formats that use this -same standard). Thus, use '-c:v mpeg4' to encode in these formats. The -default fourcc stored in an MPEG-4-coded file will be 'FMP4'. If you want -a different fourcc, use the '-vtag' option. E.g., '-vtag xvid' will -force the fourcc 'xvid' to be stored as the video fourcc rather than the -default. - -@section Which are good parameters for encoding high quality MPEG-4? - -'-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2', -things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd'. - -@section Which are good parameters for encoding high quality MPEG-1/MPEG-2? - -'-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2' -but beware the '-g 100' might cause problems with some decoders. -Things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd. - -@section Interlaced video looks very bad when encoded with ffmpeg, what is wrong? - -You should use '-flags +ilme+ildct' and maybe '-flags +alt' for interlaced -material, and try '-top 0/1' if the result looks really messed-up. - -@section How can I read DirectShow files? - -If you have built FFmpeg with @code{./configure --enable-avisynth} -(only possible on MinGW/Cygwin platforms), -then you may use any file that DirectShow can read as input. - -Just create an "input.avs" text file with this single line ... -@example -DirectShowSource("C:\path to your file\yourfile.asf") -@end example -... and then feed that text file to ffmpeg: -@example -ffmpeg -i input.avs -@end example - -For ANY other help on AviSynth, please visit the -@uref{http://www.avisynth.org/, AviSynth homepage}. - -@section How can I join video files? - -To "join" video files is quite ambiguous. The following list explains the -different kinds of "joining" and points out how those are addressed in -FFmpeg. To join video files may mean: - -@itemize - -@item -To put them one after the other: this is called to @emph{concatenate} them -(in short: concat) and is addressed -@ref{How can I concatenate video files, in this very faq}. - -@item -To put them together in the same file, to let the user choose between the -different versions (example: different audio languages): this is called to -@emph{multiplex} them together (in short: mux), and is done by simply -invoking ffmpeg with several @option{-i} options. - -@item -For audio, to put all channels together in a single stream (example: two -mono streams into one stereo stream): this is sometimes called to -@emph{merge} them, and can be done using the -@url{http://ffmpeg.org/ffmpeg-filters.html#amerge, @code{amerge}} filter. - -@item -For audio, to play one on top of the other: this is called to @emph{mix} -them, and can be done by first merging them into a single stream and then -using the @url{http://ffmpeg.org/ffmpeg-filters.html#pan, @code{pan}} filter to mix -the channels at will. - -@item -For video, to display both together, side by side or one on top of a part of -the other; it can be done using the -@url{http://ffmpeg.org/ffmpeg-filters.html#overlay, @code{overlay}} video filter. - -@end itemize - -@anchor{How can I concatenate video files} -@section How can I concatenate video files? - -There are several solutions, depending on the exact circumstances. - -@subsection Concatenating using the concat @emph{filter} - -FFmpeg has a @url{http://ffmpeg.org/ffmpeg-filters.html#concat, -@code{concat}} filter designed specifically for that, with examples in the -documentation. This operation is recommended if you need to re-encode. - -@subsection Concatenating using the concat @emph{demuxer} - -FFmpeg has a @url{http://www.ffmpeg.org/ffmpeg-formats.html#concat, -@code{concat}} demuxer which you can use when you want to avoid a re-encode and -your format doesn't support file level concatenation. - -@subsection Concatenating using the concat @emph{protocol} (file level) - -FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat, -@code{concat}} protocol designed specifically for that, with examples in the -documentation. - -A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate -video by merely concatenating the files containing them. - -Hence you may concatenate your multimedia files by first transcoding them to -these privileged formats, then using the humble @code{cat} command (or the -equally humble @code{copy} under Windows), and finally transcoding back to your -format of choice. - -@example -ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg -ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg -cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg -ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi -@end example - -Additionally, you can use the @code{concat} protocol instead of @code{cat} or -@code{copy} which will avoid creation of a potentially huge intermediate file. - -@example -ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg -ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg -ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg -ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi -@end example - -Note that you may need to escape the character "|" which is special for many -shells. - -Another option is usage of named pipes, should your platform support it: - -@example -mkfifo intermediate1.mpg -mkfifo intermediate2.mpg -ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null & -ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null & -cat intermediate1.mpg intermediate2.mpg |\ -ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi -@end example - -@subsection Concatenating using raw audio and video - -Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also -allow concatenation, and the transcoding step is almost lossless. -When using multiple yuv4mpegpipe(s), the first line needs to be discarded -from all but the first stream. This can be accomplished by piping through -@code{tail} as seen below. Note that when piping through @code{tail} you -must use command grouping, @code{@{ ;@}}, to background properly. - -For example, let's say we want to concatenate two FLV files into an -output.flv file: - -@example -mkfifo temp1.a -mkfifo temp1.v -mkfifo temp2.a -mkfifo temp2.v -mkfifo all.a -mkfifo all.v -ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null & -ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null & -ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null & -@{ ffmpeg -i input2.flv -an -f yuv4mpegpipe - < /dev/null | tail -n +2 > temp2.v ; @} & -cat temp1.a temp2.a > all.a & -cat temp1.v temp2.v > all.v & -ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \ - -f yuv4mpegpipe -i all.v \ - -y output.flv -rm temp[12].[av] all.[av] -@end example - -@section -profile option fails when encoding H.264 video with AAC audio - -@command{ffmpeg} prints an error like - -@example -Undefined constant or missing '(' in 'baseline' -Unable to parse option value "baseline" -Error setting option profile to value baseline. -@end example - -Short answer: write @option{-profile:v} instead of @option{-profile}. - -Long answer: this happens because the @option{-profile} option can apply to both -video and audio. Specifically the AAC encoder also defines some profiles, none -of which are named @var{baseline}. - -The solution is to apply the @option{-profile} option to the video stream only -by using @url{http://ffmpeg.org/ffmpeg.html#Stream-specifiers-1, Stream specifiers}. -Appending @code{:v} to it will do exactly that. - -@section Using @option{-f lavfi}, audio becomes mono for no apparent reason. - -Use @option{-dumpgraph -} to find out exactly where the channel layout is -lost. - -Most likely, it is through @code{auto-inserted aresample}. Try to understand -why the converting filter was needed at that place. - -Just before the output is a likely place, as @option{-f lavfi} currently -only support packed S16. - -Then insert the correct @code{aformat} explicitly in the filtergraph, -specifying the exact format. - -@example -aformat=sample_fmts=s16:channel_layouts=stereo -@end example - -@section Why does FFmpeg not see the subtitles in my VOB file? - -VOB and a few other formats do not have a global header that describes -everything present in the file. Instead, applications are supposed to scan -the file to see what it contains. Since VOB files are frequently large, only -the beginning is scanned. If the subtitles happen only later in the file, -they will not be initally detected. - -Some applications, including the @code{ffmpeg} command-line tool, can only -work with streams that were detected during the initial scan; streams that -are detected later are ignored. - -The size of the initial scan is controlled by two options: @code{probesize} -(default ~5 Mo) and @code{analyzeduration} (default 5,000,000 µs = 5 s). For -the subtitle stream to be detected, both values must be large enough. - -@section Why was the @command{ffmpeg} @option{-sameq} option removed? What to use instead? - -The @option{-sameq} option meant "same quantizer", and made sense only in a -very limited set of cases. Unfortunately, a lot of people mistook it for -"same quality" and used it in places where it did not make sense: it had -roughly the expected visible effect, but achieved it in a very inefficient -way. - -Each encoder has its own set of options to set the quality-vs-size balance, -use the options for the encoder you are using to set the quality level to a -point acceptable for your tastes. The most common options to do that are -@option{-qscale} and @option{-qmax}, but you should peruse the documentation -of the encoder you chose. - -@chapter Development - -@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat? - -Yes. Check the @file{doc/examples} directory in the source -repository, also available online at: -@url{https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples}. - -Examples are also installed by default, usually in -@code{$PREFIX/share/ffmpeg/examples}. - -Also you may read the Developers Guide of the FFmpeg documentation. Alternatively, -examine the source code for one of the many open source projects that -already incorporate FFmpeg at (@url{projects.html}). - -@section Can you support my C compiler XXX? - -It depends. If your compiler is C99-compliant, then patches to support -it are likely to be welcome if they do not pollute the source code -with @code{#ifdef}s related to the compiler. - -@section Is Microsoft Visual C++ supported? - -Yes. Please see the @uref{platform.html, Microsoft Visual C++} -section in the FFmpeg documentation. - -@section Can you add automake, libtool or autoconf support? - -No. These tools are too bloated and they complicate the build. - -@section Why not rewrite FFmpeg in object-oriented C++? - -FFmpeg is already organized in a highly modular manner and does not need to -be rewritten in a formal object language. Further, many of the developers -favor straight C; it works for them. For more arguments on this matter, -read @uref{http://www.tux.org/lkml/#s15, "Programming Religion"}. - -@section Why are the ffmpeg programs devoid of debugging symbols? - -The build process creates @command{ffmpeg_g}, @command{ffplay_g}, etc. which -contain full debug information. Those binaries are stripped to create -@command{ffmpeg}, @command{ffplay}, etc. If you need the debug information, use -the *_g versions. - -@section I do not like the LGPL, can I contribute code under the GPL instead? - -Yes, as long as the code is optional and can easily and cleanly be placed -under #if CONFIG_GPL without breaking anything. So, for example, a new codec -or filter would be OK under GPL while a bug fix to LGPL code would not. - -@section I'm using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves. - -FFmpeg builds static libraries by default. In static libraries, dependencies -are not handled. That has two consequences. First, you must specify the -libraries in dependency order: @code{-lavdevice} must come before -@code{-lavformat}, @code{-lavutil} must come after everything else, etc. -Second, external libraries that are used in FFmpeg have to be specified too. - -An easy way to get the full list of required libraries in dependency order -is to use @code{pkg-config}. - -@example -c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec) -@end example - -See @file{doc/example/Makefile} and @file{doc/example/pc-uninstalled} for -more details. - -@section I'm using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available. - -FFmpeg is a pure C project, so to use the libraries within your C++ application -you need to explicitly state that you are using a C library. You can do this by -encompassing your FFmpeg includes using @code{extern "C"}. - -See @url{http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3} - -@section I'm using libavutil from within my C++ application but the compiler complains about 'UINT64_C' was not declared in this scope - -FFmpeg is a pure C project using C99 math features, in order to enable C++ -to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS - -@section I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat? - -You have to create a custom AVIOContext using @code{avio_alloc_context}, -see @file{libavformat/aviobuf.c} in FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer or MPlayer2 sources. - -@section Where is the documentation about ffv1, msmpeg4, asv1, 4xm? - -see @url{http://www.ffmpeg.org/~michael/} - -@section How do I feed H.263-RTP (and other codecs in RTP) to libavcodec? - -Even if peculiar since it is network oriented, RTP is a container like any -other. You have to @emph{demux} RTP before feeding the payload to libavcodec. -In this specific case please look at RFC 4629 to see how it should be done. - -@section AVStream.r_frame_rate is wrong, it is much larger than the frame rate. - -@code{r_frame_rate} is NOT the average frame rate, it is the smallest frame rate -that can accurately represent all timestamps. So no, it is not -wrong if it is larger than the average! -For example, if you have mixed 25 and 30 fps content, then @code{r_frame_rate} -will be 150 (it is the least common multiple). -If you are looking for the average frame rate, see @code{AVStream.avg_frame_rate}. - -@section Why is @code{make fate} not running all tests? - -Make sure you have the fate-suite samples and the @code{SAMPLES} Make variable -or @code{FATE_SAMPLES} environment variable or the @code{--samples} -@command{configure} option is set to the right path. - -@section Why is @code{make fate} not finding the samples? - -Do you happen to have a @code{~} character in the samples path to indicate a -home directory? The value is used in ways where the shell cannot expand it, -causing FATE to not find files. Just replace @code{~} by the full path. - -@bye diff --git a/ffmpeg/doc/fate.texi b/ffmpeg/doc/fate.texi deleted file mode 100644 index 4e5cbd7..0000000 --- a/ffmpeg/doc/fate.texi +++ /dev/null @@ -1,205 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Automated Testing Environment -@titlepage -@center @titlefont{FFmpeg Automated Testing Environment} -@end titlepage - -@node Top -@top - -@contents - -@chapter Introduction - - FATE is an extended regression suite on the client-side and a means -for results aggregation and presentation on the server-side. - - The first part of this document explains how you can use FATE from -your FFmpeg source directory to test your ffmpeg binary. The second -part describes how you can run FATE to submit the results to FFmpeg's -FATE server. - - In any way you can have a look at the publicly viewable FATE results -by visiting this website: - - @url{http://fate.ffmpeg.org/} - - This is especially recommended for all people contributing source -code to FFmpeg, as it can be seen if some test on some platform broke -with their recent contribution. This usually happens on the platforms -the developers could not test on. - - The second part of this document describes how you can run FATE to -submit your results to FFmpeg's FATE server. If you want to submit your -results be sure to check that your combination of CPU, OS and compiler -is not already listed on the above mentioned website. - - In the third part you can find a comprehensive listing of FATE makefile -targets and variables. - - -@chapter Using FATE from your FFmpeg source directory - - If you want to run FATE on your machine you need to have the samples -in place. You can get the samples via the build target fate-rsync. -Use this command from the top-level source directory: - -@example -make fate-rsync SAMPLES=fate-suite/ -make fate SAMPLES=fate-suite/ -@end example - - The above commands set the samples location by passing a makefile -variable via command line. It is also possible to set the samples -location at source configuration time by invoking configure with -`--samples=<path to the samples directory>'. Afterwards you can -invoke the makefile targets without setting the SAMPLES makefile -variable. This is illustrated by the following commands: - -@example -./configure --samples=fate-suite/ -make fate-rsync -make fate -@end example - - Yet another way to tell FATE about the location of the sample -directory is by making sure the environment variable FATE_SAMPLES -contains the path to your samples directory. This can be achieved -by e.g. putting that variable in your shell profile or by setting -it in your interactive session. - -@example -FATE_SAMPLES=fate-suite/ make fate -@end example - -@float NOTE -Do not put a '~' character in the samples path to indicate a home -directory. Because of shell nuances, this will cause FATE to fail. -@end float - -To use a custom wrapper to run the test, pass @option{--target-exec} to -@command{configure} or set the @var{TARGET_EXEC} Make variable. - - -@chapter Submitting the results to the FFmpeg result aggregation server - - To submit your results to the server you should run fate through the -shell script @file{tests/fate.sh} from the FFmpeg sources. This script needs -to be invoked with a configuration file as its first argument. - -@example -tests/fate.sh /path/to/fate_config -@end example - - A configuration file template with comments describing the individual -configuration variables can be found at @file{doc/fate_config.sh.template}. - -@ifhtml - The mentioned configuration template is also available here: -@verbatiminclude fate_config.sh.template -@end ifhtml - - Create a configuration that suits your needs, based on the configuration -template. The `slot' configuration variable can be any string that is not -yet used, but it is suggested that you name it adhering to the following -pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file -itself will be sourced in a shell script, therefore all shell features may -be used. This enables you to setup the environment as you need it for your -build. - - For your first test runs the `fate_recv' variable should be empty or -commented out. This will run everything as normal except that it will omit -the submission of the results to the server. The following files should be -present in $workdir as specified in the configuration file: - -@itemize - @item configure.log - @item compile.log - @item test.log - @item report - @item version -@end itemize - - When you have everything working properly you can create an SSH key pair -and send the public key to the FATE server administrator who can be contacted -at the email address @email{fate-admin@@ffmpeg.org}. - - Configure your SSH client to use public key authentication with that key -when connecting to the FATE server. Also do not forget to check the identity -of the server and to accept its host key. This can usually be achieved by -running your SSH client manually and killing it after you accepted the key. -The FATE server's fingerprint is: - -@table @option -@item RSA - d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51 -@item ECDSA - 76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86 -@end table - - If you have problems connecting to the FATE server, it may help to try out -the @command{ssh} command with one or more @option{-v} options. You should -get detailed output concerning your SSH configuration and the authentication -process. - - The only thing left is to automate the execution of the fate.sh script and -the synchronisation of the samples directory. - - -@chapter FATE makefile targets and variables - -@section Makefile targets - -@table @option -@item fate-rsync -Download/synchronize sample files to the configured samples directory. - -@item fate-list -Will list all fate/regression test targets. - -@item fate -Run the FATE test suite (requires the fate-suite dataset). -@end table - -@section Makefile variables - -@table @option -@item V -Verbosity level, can be set to 0, 1 or 2. - @itemize - @item 0: show just the test arguments - @item 1: show just the command used in the test - @item 2: show everything - @end itemize - -@item SAMPLES -Specify or override the path to the FATE samples at make time, it has a -meaning only while running the regression tests. - -@item THREADS -Specify how many threads to use while running regression tests, it is -quite useful to detect thread-related regressions. - -@item THREAD_TYPE -Specify which threading strategy test, either @var{slice} or @var{frame}, -by default @var{slice+frame} - -@item CPUFLAGS -Specify CPU flags. - -@item TARGET_EXEC -Specify or override the wrapper used to run the tests. -The @var{TARGET_EXEC} option provides a way to run FATE wrapped in -@command{valgrind}, @command{qemu-user} or @command{wine} or on remote targets -through @command{ssh}. - -@item GEN -Set to @var{1} to generate the missing or mismatched references. -@end table - -@section Examples - -@example -make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate -@end example diff --git a/ffmpeg/doc/fate_config.sh.template b/ffmpeg/doc/fate_config.sh.template deleted file mode 100644 index 1487c1d..0000000 --- a/ffmpeg/doc/fate_config.sh.template +++ /dev/null @@ -1,29 +0,0 @@ -slot= # some unique identifier -repo=git://source.ffmpeg.org/ffmpeg.git # the source repository -samples= # path to samples directory -workdir= # directory in which to do all the work -#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report -comment= # optional description -build_only= # set to "yes" for a compile-only instance that skips tests - -# the following are optional and map to configure options -arch= -cpu= -cross_prefix= -as= -cc= -ld= -target_os= -sysroot= -target_exec= -target_path= -target_samples= -extra_cflags= -extra_ldflags= -extra_libs= -extra_conf= # extra configure options not covered above - -#make= # name of GNU make if not 'make' -makeopts= # extra options passed to 'make' -#tar= # command to create a tar archive from its arguments on stdout, - # defaults to 'tar c' diff --git a/ffmpeg/doc/ffmpeg-bitstream-filters.texi b/ffmpeg/doc/ffmpeg-bitstream-filters.texi deleted file mode 100644 index e33e005..0000000 --- a/ffmpeg/doc/ffmpeg-bitstream-filters.texi +++ /dev/null @@ -1,45 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Bitstream Filters Documentation -@titlepage -@center @titlefont{FFmpeg Bitstream Filters Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -This document describes the bitstream filters provided by the -libavcodec library. - -A bitstream filter operates on the encoded stream data, and performs -bitstream level modifications without performing decoding. - -@c man end DESCRIPTION - -@include bitstream_filters.texi - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{libavcodec.html,libavcodec} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffmpeg-bitstream-filters -@settitle FFmpeg bitstream filters - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffmpeg-codecs.texi b/ffmpeg/doc/ffmpeg-codecs.texi deleted file mode 100644 index 6f8f5a3..0000000 --- a/ffmpeg/doc/ffmpeg-codecs.texi +++ /dev/null @@ -1,42 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Codecs Documentation -@titlepage -@center @titlefont{FFmpeg Codecs Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -This document describes the codecs (decoders and encoders) provided by -the libavcodec library. - -@c man end DESCRIPTION - -@include codecs.texi - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{libavcodec.html,libavcodec} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffmpeg-codecs -@settitle FFmpeg codecs - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffmpeg-devices.texi b/ffmpeg/doc/ffmpeg-devices.texi deleted file mode 100644 index b44bd72..0000000 --- a/ffmpeg/doc/ffmpeg-devices.texi +++ /dev/null @@ -1,42 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Devices Documentation -@titlepage -@center @titlefont{FFmpeg Devices Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -This document describes the input and output devices provided by the -libavdevice library. - -@c man end DESCRIPTION - -@include devices.texi - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{libavdevice.html,libavdevice} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavdevice(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffmpeg-devices -@settitle FFmpeg devices - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffmpeg-filters.texi b/ffmpeg/doc/ffmpeg-filters.texi deleted file mode 100644 index bb920ce..0000000 --- a/ffmpeg/doc/ffmpeg-filters.texi +++ /dev/null @@ -1,42 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Filters Documentation -@titlepage -@center @titlefont{FFmpeg Filters Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -This document describes filters, sources, and sinks provided by the -libavfilter library. - -@c man end DESCRIPTION - -@include filters.texi - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{libavfilter.html,libavfilter} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavfilter(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffmpeg-filters -@settitle FFmpeg filters - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffmpeg-formats.texi b/ffmpeg/doc/ffmpeg-formats.texi deleted file mode 100644 index e205caa..0000000 --- a/ffmpeg/doc/ffmpeg-formats.texi +++ /dev/null @@ -1,42 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Formats Documentation -@titlepage -@center @titlefont{FFmpeg Formats Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -This document describes the supported formats (muxers and demuxers) -provided by the libavformat library. - -@c man end DESCRIPTION - -@include formats.texi - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{libavformat.html,libavformat} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffmpeg-formats -@settitle FFmpeg formats - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffmpeg-protocols.texi b/ffmpeg/doc/ffmpeg-protocols.texi deleted file mode 100644 index d992e75..0000000 --- a/ffmpeg/doc/ffmpeg-protocols.texi +++ /dev/null @@ -1,42 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Protocols Documentation -@titlepage -@center @titlefont{FFmpeg Protocols Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -This document describes the input and output protocols provided by the -libavformat library. - -@c man end DESCRIPTION - -@include protocols.texi - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{libavformat.html,libavformat} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffmpeg-protocols -@settitle FFmpeg protocols - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffmpeg-resampler.texi b/ffmpeg/doc/ffmpeg-resampler.texi deleted file mode 100644 index 69767a2..0000000 --- a/ffmpeg/doc/ffmpeg-resampler.texi +++ /dev/null @@ -1,44 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Resampler Documentation -@titlepage -@center @titlefont{FFmpeg Resampler Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -The FFmpeg resampler provides a high-level interface to the -libswresample library audio resampling utilities. In particular it -allows to perform audio resampling, audio channel layout rematrixing, -and convert audio format and packing layout. - -@c man end DESCRIPTION - -@include resampler.texi - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{libswresample.html,libswresample} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswresample(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffmpeg-resampler -@settitle FFmpeg Resampler - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffmpeg-scaler.texi b/ffmpeg/doc/ffmpeg-scaler.texi deleted file mode 100644 index 1eb8cd6..0000000 --- a/ffmpeg/doc/ffmpeg-scaler.texi +++ /dev/null @@ -1,43 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Scaler Documentation -@titlepage -@center @titlefont{FFmpeg Scaler Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -The FFmpeg rescaler provides a high-level interface to the libswscale -library image conversion utilities. In particular it allows to perform -image rescaling and pixel format conversion. - -@c man end DESCRIPTION - -@include scaler.texi - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{libswscale.html,libswscale} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswscale(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffmpeg-scaler -@settitle FFmpeg video scaling and pixel format converter - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffmpeg-utils.texi b/ffmpeg/doc/ffmpeg-utils.texi deleted file mode 100644 index 581e2ea..0000000 --- a/ffmpeg/doc/ffmpeg-utils.texi +++ /dev/null @@ -1,42 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Utilities Documentation -@titlepage -@center @titlefont{FFmpeg Utilities Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -This document describes some generic features and utilities provided -by the libavutil library. - -@c man end DESCRIPTION - -@include utils.texi - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{libavutil.html,libavutil} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavutil(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffmpeg-utils -@settitle FFmpeg utilities - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffmpeg.texi b/ffmpeg/doc/ffmpeg.texi deleted file mode 100644 index 0a930ce..0000000 --- a/ffmpeg/doc/ffmpeg.texi +++ /dev/null @@ -1,1497 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle ffmpeg Documentation -@titlepage -@center @titlefont{ffmpeg Documentation} -@end titlepage - -@top - -@contents - -@chapter Synopsis - -ffmpeg [@var{global_options}] @{[@var{input_file_options}] -i @file{input_file}@} ... @{[@var{output_file_options}] @file{output_file}@} ... - -@chapter Description -@c man begin DESCRIPTION - -@command{ffmpeg} is a very fast video and audio converter that can also grab from -a live audio/video source. It can also convert between arbitrary sample -rates and resize video on the fly with a high quality polyphase filter. - -@command{ffmpeg} reads from an arbitrary number of input "files" (which can be regular -files, pipes, network streams, grabbing devices, etc.), specified by the -@code{-i} option, and writes to an arbitrary number of output "files", which are -specified by a plain output filename. Anything found on the command line which -cannot be interpreted as an option is considered to be an output filename. - -Each input or output file can, in principle, contain any number of streams of -different types (video/audio/subtitle/attachment/data). The allowed number and/or -types of streams may be limited by the container format. Selecting which -streams from which inputs will go into which output is either done automatically -or with the @code{-map} option (see the Stream selection chapter). - -To refer to input files in options, you must use their indices (0-based). E.g. -the first input file is @code{0}, the second is @code{1}, etc. Similarly, streams -within a file are referred to by their indices. E.g. @code{2:3} refers to the -fourth stream in the third input file. Also see the Stream specifiers chapter. - -As a general rule, options are applied to the next specified -file. Therefore, order is important, and you can have the same -option on the command line multiple times. Each occurrence is -then applied to the next input or output file. -Exceptions from this rule are the global options (e.g. verbosity level), -which should be specified first. - -Do not mix input and output files -- first specify all input files, then all -output files. Also do not mix options which belong to different files. All -options apply ONLY to the next input or output file and are reset between files. - -@itemize -@item -To set the video bitrate of the output file to 64 kbit/s: -@example -ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi -@end example - -@item -To force the frame rate of the output file to 24 fps: -@example -ffmpeg -i input.avi -r 24 output.avi -@end example - -@item -To force the frame rate of the input file (valid for raw formats only) -to 1 fps and the frame rate of the output file to 24 fps: -@example -ffmpeg -r 1 -i input.m2v -r 24 output.avi -@end example -@end itemize - -The format option may be needed for raw input files. - -@c man end DESCRIPTION - -@chapter Detailed description -@c man begin DETAILED DESCRIPTION - -The transcoding process in @command{ffmpeg} for each output can be described by -the following diagram: - -@example - _______ ______________ _________ ______________ ________ -| | | | | | | | | | -| input | demuxer | encoded data | decoder | decoded | encoder | encoded data | muxer | output | -| file | ---------> | packets | ---------> | frames | ---------> | packets | -------> | file | -|_______| |______________| |_________| |______________| |________| - -@end example - -@command{ffmpeg} calls the libavformat library (containing demuxers) to read -input files and get packets containing encoded data from them. When there are -multiple input files, @command{ffmpeg} tries to keep them synchronized by -tracking lowest timestamp on any active input stream. - -Encoded packets are then passed to the decoder (unless streamcopy is selected -for the stream, see further for a description). The decoder produces -uncompressed frames (raw video/PCM audio/...) which can be processed further by -filtering (see next section). After filtering, the frames are passed to the -encoder, which encodes them and outputs encoded packets. Finally those are -passed to the muxer, which writes the encoded packets to the output file. - -@section Filtering -Before encoding, @command{ffmpeg} can process raw audio and video frames using -filters from the libavfilter library. Several chained filters form a filter -graph. @command{ffmpeg} distinguishes between two types of filtergraphs: -simple and complex. - -@subsection Simple filtergraphs -Simple filtergraphs are those that have exactly one input and output, both of -the same type. In the above diagram they can be represented by simply inserting -an additional step between decoding and encoding: - -@example - _________ __________ ______________ -| | | | | | -| decoded | simple filtergraph | filtered | encoder | encoded data | -| frames | -------------------> | frames | ---------> | packets | -|_________| |__________| |______________| - -@end example - -Simple filtergraphs are configured with the per-stream @option{-filter} option -(with @option{-vf} and @option{-af} aliases for video and audio respectively). -A simple filtergraph for video can look for example like this: - -@example - _______ _____________ _______ _____ ________ -| | | | | | | | | | -| input | ---> | deinterlace | ---> | scale | ---> | fps | ---> | output | -|_______| |_____________| |_______| |_____| |________| - -@end example - -Note that some filters change frame properties but not frame contents. E.g. the -@code{fps} filter in the example above changes number of frames, but does not -touch the frame contents. Another example is the @code{setpts} filter, which -only sets timestamps and otherwise passes the frames unchanged. - -@subsection Complex filtergraphs -Complex filtergraphs are those which cannot be described as simply a linear -processing chain applied to one stream. This is the case, for example, when the graph has -more than one input and/or output, or when output stream type is different from -input. They can be represented with the following diagram: - -@example - _________ -| | -| input 0 |\ __________ -|_________| \ | | - \ _________ /| output 0 | - \ | | / |__________| - _________ \| complex | / -| | | |/ -| input 1 |---->| filter |\ -|_________| | | \ __________ - /| graph | \ | | - / | | \| output 1 | - _________ / |_________| |__________| -| | / -| input 2 |/ -|_________| - -@end example - -Complex filtergraphs are configured with the @option{-filter_complex} option. -Note that this option is global, since a complex filtergraph, by its nature, -cannot be unambiguously associated with a single stream or file. - -The @option{-lavfi} option is equivalent to @option{-filter_complex}. - -A trivial example of a complex filtergraph is the @code{overlay} filter, which -has two video inputs and one video output, containing one video overlaid on top -of the other. Its audio counterpart is the @code{amix} filter. - -@section Stream copy -Stream copy is a mode selected by supplying the @code{copy} parameter to the -@option{-codec} option. It makes @command{ffmpeg} omit the decoding and encoding -step for the specified stream, so it does only demuxing and muxing. It is useful -for changing the container format or modifying container-level metadata. The -diagram above will, in this case, simplify to this: - -@example - _______ ______________ ________ -| | | | | | -| input | demuxer | encoded data | muxer | output | -| file | ---------> | packets | -------> | file | -|_______| |______________| |________| - -@end example - -Since there is no decoding or encoding, it is very fast and there is no quality -loss. However, it might not work in some cases because of many factors. Applying -filters is obviously also impossible, since filters work on uncompressed data. - -@c man end DETAILED DESCRIPTION - -@chapter Stream selection -@c man begin STREAM SELECTION - -By default, @command{ffmpeg} includes only one stream of each type (video, audio, subtitle) -present in the input files and adds them to each output file. It picks the -"best" of each based upon the following criteria: for video, it is the stream -with the highest resolution, for audio, it is the stream with the most channels, for -subtitles, it is the first subtitle stream. In the case where several streams of -the same type rate equally, the stream with the lowest index is chosen. - -You can disable some of those defaults by using the @code{-vn/-an/-sn} options. For -full manual control, use the @code{-map} option, which disables the defaults just -described. - -@c man end STREAM SELECTION - -@chapter Options -@c man begin OPTIONS - -@include fftools-common-opts.texi - -@section Main options - -@table @option - -@item -f @var{fmt} (@emph{input/output}) -Force input or output file format. The format is normally auto detected for input -files and guessed from the file extension for output files, so this option is not -needed in most cases. - -@item -i @var{filename} (@emph{input}) -input file name - -@item -y (@emph{global}) -Overwrite output files without asking. - -@item -n (@emph{global}) -Do not overwrite output files, and exit immediately if a specified -output file already exists. - -@item -c[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream}) -@itemx -codec[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream}) -Select an encoder (when used before an output file) or a decoder (when used -before an input file) for one or more streams. @var{codec} is the name of a -decoder/encoder or a special value @code{copy} (output only) to indicate that -the stream is not to be re-encoded. - -For example -@example -ffmpeg -i INPUT -map 0 -c:v libx264 -c:a copy OUTPUT -@end example -encodes all video streams with libx264 and copies all audio streams. - -For each stream, the last matching @code{c} option is applied, so -@example -ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT -@end example -will copy all the streams except the second video, which will be encoded with -libx264, and the 138th audio, which will be encoded with libvorbis. - -@item -t @var{duration} (@emph{output}) -Stop writing the output after its duration reaches @var{duration}. -@var{duration} may be a number in seconds, or in @code{hh:mm:ss[.xxx]} form. - --to and -t are mutually exclusive and -t has priority. - -@item -to @var{position} (@emph{output}) -Stop writing the output at @var{position}. -@var{position} may be a number in seconds, or in @code{hh:mm:ss[.xxx]} form. - --to and -t are mutually exclusive and -t has priority. - -@item -fs @var{limit_size} (@emph{output}) -Set the file size limit, expressed in bytes. - -@item -ss @var{position} (@emph{input/output}) -When used as an input option (before @code{-i}), seeks in this input file to -@var{position}. Note the in most formats it is not possible to seek exactly, so -@command{ffmpeg} will seek to the closest seek point before @var{position}. -When transcoding and @option{-accurate_seek} is enabled (the default), this -extra segment between the seek point and @var{position} will be decoded and -discarded. When doing stream copy or when @option{-noaccurate_seek} is used, it -will be preserved. - -When used as an output option (before an output filename), decodes but discards -input until the timestamps reach @var{position}. - -@var{position} may be either in seconds or in @code{hh:mm:ss[.xxx]} form. - -@item -itsoffset @var{offset} (@emph{input}) -Set the input time offset in seconds. -@code{[-]hh:mm:ss[.xxx]} syntax is also supported. -The offset is added to the timestamps of the input files. -Specifying a positive offset means that the corresponding -streams are delayed by @var{offset} seconds. - -@item -timestamp @var{time} (@emph{output}) -Set the recording timestamp in the container. -The syntax for @var{time} is: -@example -now|([(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...])|(HHMMSS[.m...]))[Z|z]) -@end example -If the value is "now" it takes the current time. -Time is local time unless 'Z' or 'z' is appended, in which case it is -interpreted as UTC. -If the year-month-day part is not specified it takes the current -year-month-day. - -@item -metadata[:metadata_specifier] @var{key}=@var{value} (@emph{output,per-metadata}) -Set a metadata key/value pair. - -An optional @var{metadata_specifier} may be given to set metadata -on streams or chapters. See @code{-map_metadata} documentation for -details. - -This option overrides metadata set with @code{-map_metadata}. It is -also possible to delete metadata by using an empty value. - -For example, for setting the title in the output file: -@example -ffmpeg -i in.avi -metadata title="my title" out.flv -@end example - -To set the language of the first audio stream: -@example -ffmpeg -i INPUT -metadata:s:a:1 language=eng OUTPUT -@end example - -@item -target @var{type} (@emph{output}) -Specify target file type (@code{vcd}, @code{svcd}, @code{dvd}, @code{dv}, -@code{dv50}). @var{type} may be prefixed with @code{pal-}, @code{ntsc-} or -@code{film-} to use the corresponding standard. All the format options -(bitrate, codecs, buffer sizes) are then set automatically. You can just type: - -@example -ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg -@end example - -Nevertheless you can specify additional options as long as you know -they do not conflict with the standard, as in: - -@example -ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg -@end example - -@item -dframes @var{number} (@emph{output}) -Set the number of data frames to record. This is an alias for @code{-frames:d}. - -@item -frames[:@var{stream_specifier}] @var{framecount} (@emph{output,per-stream}) -Stop writing to the stream after @var{framecount} frames. - -@item -q[:@var{stream_specifier}] @var{q} (@emph{output,per-stream}) -@itemx -qscale[:@var{stream_specifier}] @var{q} (@emph{output,per-stream}) -Use fixed quality scale (VBR). The meaning of @var{q}/@var{qscale} is -codec-dependent. -If @var{qscale} is used without a @var{stream_specifier} then it applies only -to the video stream, this is to maintain compatibility with previous behavior -and as specifying the same codec specific value to 2 different codecs that is -audio and video generally is not what is intended when no stream_specifier is -used. - -@anchor{filter_option} -@item -filter[:@var{stream_specifier}] @var{filtergraph} (@emph{output,per-stream}) -Create the filtergraph specified by @var{filtergraph} and use it to -filter the stream. - -@var{filtergraph} is a description of the filtergraph to apply to -the stream, and must have a single input and a single output of the -same type of the stream. In the filtergraph, the input is associated -to the label @code{in}, and the output to the label @code{out}. See -the ffmpeg-filters manual for more information about the filtergraph -syntax. - -See the @ref{filter_complex_option,,-filter_complex option} if you -want to create filtergraphs with multiple inputs and/or outputs. - -@item -filter_script[:@var{stream_specifier}] @var{filename} (@emph{output,per-stream}) -This option is similar to @option{-filter}, the only difference is that its -argument is the name of the file from which a filtergraph description is to be -read. - -@item -pre[:@var{stream_specifier}] @var{preset_name} (@emph{output,per-stream}) -Specify the preset for matching stream(s). - -@item -stats (@emph{global}) -Print encoding progress/statistics. It is on by default, to explicitly -disable it you need to specify @code{-nostats}. - -@item -progress @var{url} (@emph{global}) -Send program-friendly progress information to @var{url}. - -Progress information is written approximately every second and at the end of -the encoding process. It is made of "@var{key}=@var{value}" lines. @var{key} -consists of only alphanumeric characters. The last key of a sequence of -progress information is always "progress". - -@item -stdin -Enable interaction on standard input. On by default unless standard input is -used as an input. To explicitly disable interaction you need to specify -@code{-nostdin}. - -Disabling interaction on standard input is useful, for example, if -ffmpeg is in the background process group. Roughly the same result can -be achieved with @code{ffmpeg ... < /dev/null} but it requires a -shell. - -@item -debug_ts (@emph{global}) -Print timestamp information. It is off by default. This option is -mostly useful for testing and debugging purposes, and the output -format may change from one version to another, so it should not be -employed by portable scripts. - -See also the option @code{-fdebug ts}. - -@item -attach @var{filename} (@emph{output}) -Add an attachment to the output file. This is supported by a few formats -like Matroska for e.g. fonts used in rendering subtitles. Attachments -are implemented as a specific type of stream, so this option will add -a new stream to the file. It is then possible to use per-stream options -on this stream in the usual way. Attachment streams created with this -option will be created after all the other streams (i.e. those created -with @code{-map} or automatic mappings). - -Note that for Matroska you also have to set the mimetype metadata tag: -@example -ffmpeg -i INPUT -attach DejaVuSans.ttf -metadata:s:2 mimetype=application/x-truetype-font out.mkv -@end example -(assuming that the attachment stream will be third in the output file). - -@item -dump_attachment[:@var{stream_specifier}] @var{filename} (@emph{input,per-stream}) -Extract the matching attachment stream into a file named @var{filename}. If -@var{filename} is empty, then the value of the @code{filename} metadata tag -will be used. - -E.g. to extract the first attachment to a file named 'out.ttf': -@example -ffmpeg -dump_attachment:t:0 out.ttf -i INPUT -@end example -To extract all attachments to files determined by the @code{filename} tag: -@example -ffmpeg -dump_attachment:t "" -i INPUT -@end example - -Technical note -- attachments are implemented as codec extradata, so this -option can actually be used to extract extradata from any stream, not just -attachments. - -@end table - -@section Video Options - -@table @option -@item -vframes @var{number} (@emph{output}) -Set the number of video frames to record. This is an alias for @code{-frames:v}. -@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream}) -Set frame rate (Hz value, fraction or abbreviation). - -As an input option, ignore any timestamps stored in the file and instead -generate timestamps assuming constant frame rate @var{fps}. - -As an output option, duplicate or drop input frames to achieve constant output -frame rate @var{fps}. - -@item -s[:@var{stream_specifier}] @var{size} (@emph{input/output,per-stream}) -Set frame size. - -As an input option, this is a shortcut for the @option{video_size} private -option, recognized by some demuxers for which the frame size is either not -stored in the file or is configurable -- e.g. raw video or video grabbers. - -As an output option, this inserts the @code{scale} video filter to the -@emph{end} of the corresponding filtergraph. Please use the @code{scale} filter -directly to insert it at the beginning or some other place. - -The format is @samp{wxh} (default - same as source). - -@item -aspect[:@var{stream_specifier}] @var{aspect} (@emph{output,per-stream}) -Set the video display aspect ratio specified by @var{aspect}. - -@var{aspect} can be a floating point number string, or a string of the -form @var{num}:@var{den}, where @var{num} and @var{den} are the -numerator and denominator of the aspect ratio. For example "4:3", -"16:9", "1.3333", and "1.7777" are valid argument values. - -If used together with @option{-vcodec copy}, it will affect the aspect ratio -stored at container level, but not the aspect ratio stored in encoded -frames, if it exists. - -@item -vn (@emph{output}) -Disable video recording. - -@item -vcodec @var{codec} (@emph{output}) -Set the video codec. This is an alias for @code{-codec:v}. - -@item -pass[:@var{stream_specifier}] @var{n} (@emph{output,per-stream}) -Select the pass number (1 or 2). It is used to do two-pass -video encoding. The statistics of the video are recorded in the first -pass into a log file (see also the option -passlogfile), -and in the second pass that log file is used to generate the video -at the exact requested bitrate. -On pass 1, you may just deactivate audio and set output to null, -examples for Windows and Unix: -@example -ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL -ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null -@end example - -@item -passlogfile[:@var{stream_specifier}] @var{prefix} (@emph{output,per-stream}) -Set two-pass log file name prefix to @var{prefix}, the default file name -prefix is ``ffmpeg2pass''. The complete file name will be -@file{PREFIX-N.log}, where N is a number specific to the output -stream - -@item -vlang @var{code} -Set the ISO 639 language code (3 letters) of the current video stream. - -@item -vf @var{filtergraph} (@emph{output}) -Create the filtergraph specified by @var{filtergraph} and use it to -filter the stream. - -This is an alias for @code{-filter:v}, see the @ref{filter_option,,-filter option}. -@end table - -@section Advanced Video Options - -@table @option -@item -pix_fmt[:@var{stream_specifier}] @var{format} (@emph{input/output,per-stream}) -Set pixel format. Use @code{-pix_fmts} to show all the supported -pixel formats. -If the selected pixel format can not be selected, ffmpeg will print a -warning and select the best pixel format supported by the encoder. -If @var{pix_fmt} is prefixed by a @code{+}, ffmpeg will exit with an error -if the requested pixel format can not be selected, and automatic conversions -inside filtergraphs are disabled. -If @var{pix_fmt} is a single @code{+}, ffmpeg selects the same pixel format -as the input (or graph output) and automatic conversions are disabled. - -@item -sws_flags @var{flags} (@emph{input/output}) -Set SwScaler flags. -@item -vdt @var{n} -Discard threshold. - -@item -rc_override[:@var{stream_specifier}] @var{override} (@emph{output,per-stream}) -Rate control override for specific intervals, formatted as "int,int,int" -list separated with slashes. Two first values are the beginning and -end frame numbers, last one is quantizer to use if positive, or quality -factor if negative. - -@item -ilme -Force interlacing support in encoder (MPEG-2 and MPEG-4 only). -Use this option if your input file is interlaced and you want -to keep the interlaced format for minimum losses. -The alternative is to deinterlace the input stream with -@option{-deinterlace}, but deinterlacing introduces losses. -@item -psnr -Calculate PSNR of compressed frames. -@item -vstats -Dump video coding statistics to @file{vstats_HHMMSS.log}. -@item -vstats_file @var{file} -Dump video coding statistics to @var{file}. -@item -top[:@var{stream_specifier}] @var{n} (@emph{output,per-stream}) -top=1/bottom=0/auto=-1 field first -@item -dc @var{precision} -Intra_dc_precision. -@item -vtag @var{fourcc/tag} (@emph{output}) -Force video tag/fourcc. This is an alias for @code{-tag:v}. -@item -qphist (@emph{global}) -Show QP histogram -@item -vbsf @var{bitstream_filter} -Deprecated see -bsf - -@item -force_key_frames[:@var{stream_specifier}] @var{time}[,@var{time}...] (@emph{output,per-stream}) -@item -force_key_frames[:@var{stream_specifier}] expr:@var{expr} (@emph{output,per-stream}) -Force key frames at the specified timestamps, more precisely at the first -frames after each specified time. - -If the argument is prefixed with @code{expr:}, the string @var{expr} -is interpreted like an expression and is evaluated for each frame. A -key frame is forced in case the evaluation is non-zero. - -If one of the times is "@code{chapters}[@var{delta}]", it is expanded into -the time of the beginning of all chapters in the file, shifted by -@var{delta}, expressed as a time in seconds. -This option can be useful to ensure that a seek point is present at a -chapter mark or any other designated place in the output file. - -For example, to insert a key frame at 5 minutes, plus key frames 0.1 second -before the beginning of every chapter: -@example --force_key_frames 0:05:00,chapters-0.1 -@end example - -The expression in @var{expr} can contain the following constants: -@table @option -@item n -the number of current processed frame, starting from 0 -@item n_forced -the number of forced frames -@item prev_forced_n -the number of the previous forced frame, it is @code{NAN} when no -keyframe was forced yet -@item prev_forced_t -the time of the previous forced frame, it is @code{NAN} when no -keyframe was forced yet -@item t -the time of the current processed frame -@end table - -For example to force a key frame every 5 seconds, you can specify: -@example --force_key_frames expr:gte(t,n_forced*5) -@end example - -To force a key frame 5 seconds after the time of the last forced one, -starting from second 13: -@example --force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5)) -@end example - -Note that forcing too many keyframes is very harmful for the lookahead -algorithms of certain encoders: using fixed-GOP options or similar -would be more efficient. - -@item -copyinkf[:@var{stream_specifier}] (@emph{output,per-stream}) -When doing stream copy, copy also non-key frames found at the -beginning. - -@item -hwaccel[:@var{stream_specifier}] @var{hwaccel} (@emph{input,per-stream}) -Use hardware acceleration to decode the matching stream(s). The allowed values -of @var{hwaccel} are: -@table @option -@item none -Do not use any hardware acceleration (the default). - -@item auto -Automatically select the hardware acceleration method. - -@item vdpau -Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration. -@end table - -This option has no effect if the selected hwaccel is not available or not -supported by the chosen decoder. - -Note that most acceleration methods are intended for playback and will not be -faster than software decoding on modern CPUs. Additionally, @command{ffmpeg} -will usually need to copy the decoded frames from the GPU memory into the system -memory, resulting in further performance loss. This option is thus mainly -useful for testing. - -@item -hwaccel_device[:@var{stream_specifier}] @var{hwaccel_device} (@emph{input,per-stream}) -Select a device to use for hardware acceleration. - -This option only makes sense when the @option{-hwaccel} option is also -specified. Its exact meaning depends on the specific hardware acceleration -method chosen. - -@table @option -@item vdpau -For VDPAU, this option specifies the X11 display/screen to use. If this option -is not specified, the value of the @var{DISPLAY} environment variable is used -@end table -@end table - -@section Audio Options - -@table @option -@item -aframes @var{number} (@emph{output}) -Set the number of audio frames to record. This is an alias for @code{-frames:a}. -@item -ar[:@var{stream_specifier}] @var{freq} (@emph{input/output,per-stream}) -Set the audio sampling frequency. For output streams it is set by -default to the frequency of the corresponding input stream. For input -streams this option only makes sense for audio grabbing devices and raw -demuxers and is mapped to the corresponding demuxer options. -@item -aq @var{q} (@emph{output}) -Set the audio quality (codec-specific, VBR). This is an alias for -q:a. -@item -ac[:@var{stream_specifier}] @var{channels} (@emph{input/output,per-stream}) -Set the number of audio channels. For output streams it is set by -default to the number of input audio channels. For input streams -this option only makes sense for audio grabbing devices and raw demuxers -and is mapped to the corresponding demuxer options. -@item -an (@emph{output}) -Disable audio recording. -@item -acodec @var{codec} (@emph{input/output}) -Set the audio codec. This is an alias for @code{-codec:a}. -@item -sample_fmt[:@var{stream_specifier}] @var{sample_fmt} (@emph{output,per-stream}) -Set the audio sample format. Use @code{-sample_fmts} to get a list -of supported sample formats. - -@item -af @var{filtergraph} (@emph{output}) -Create the filtergraph specified by @var{filtergraph} and use it to -filter the stream. - -This is an alias for @code{-filter:a}, see the @ref{filter_option,,-filter option}. -@end table - -@section Advanced Audio options: - -@table @option -@item -atag @var{fourcc/tag} (@emph{output}) -Force audio tag/fourcc. This is an alias for @code{-tag:a}. -@item -absf @var{bitstream_filter} -Deprecated, see -bsf -@item -guess_layout_max @var{channels} (@emph{input,per-stream}) -If some input channel layout is not known, try to guess only if it -corresponds to at most the specified number of channels. For example, 2 -tells to @command{ffmpeg} to recognize 1 channel as mono and 2 channels as -stereo but not 6 channels as 5.1. The default is to always try to guess. Use -0 to disable all guessing. -@end table - -@section Subtitle options: - -@table @option -@item -slang @var{code} -Set the ISO 639 language code (3 letters) of the current subtitle stream. -@item -scodec @var{codec} (@emph{input/output}) -Set the subtitle codec. This is an alias for @code{-codec:s}. -@item -sn (@emph{output}) -Disable subtitle recording. -@item -sbsf @var{bitstream_filter} -Deprecated, see -bsf -@end table - -@section Advanced Subtitle options: - -@table @option - -@item -fix_sub_duration -Fix subtitles durations. For each subtitle, wait for the next packet in the -same stream and adjust the duration of the first to avoid overlap. This is -necessary with some subtitles codecs, especially DVB subtitles, because the -duration in the original packet is only a rough estimate and the end is -actually marked by an empty subtitle frame. Failing to use this option when -necessary can result in exaggerated durations or muxing failures due to -non-monotonic timestamps. - -Note that this option will delay the output of all data until the next -subtitle packet is decoded: it may increase memory consumption and latency a -lot. - -@item -canvas_size @var{size} -Set the size of the canvas used to render subtitles. - -@end table - -@section Advanced options - -@table @option -@item -map [-]@var{input_file_id}[:@var{stream_specifier}][,@var{sync_file_id}[:@var{stream_specifier}]] | @var{[linklabel]} (@emph{output}) - -Designate one or more input streams as a source for the output file. Each input -stream is identified by the input file index @var{input_file_id} and -the input stream index @var{input_stream_id} within the input -file. Both indices start at 0. If specified, -@var{sync_file_id}:@var{stream_specifier} sets which input stream -is used as a presentation sync reference. - -The first @code{-map} option on the command line specifies the -source for output stream 0, the second @code{-map} option specifies -the source for output stream 1, etc. - -A @code{-} character before the stream identifier creates a "negative" mapping. -It disables matching streams from already created mappings. - -An alternative @var{[linklabel]} form will map outputs from complex filter -graphs (see the @option{-filter_complex} option) to the output file. -@var{linklabel} must correspond to a defined output link label in the graph. - -For example, to map ALL streams from the first input file to output -@example -ffmpeg -i INPUT -map 0 output -@end example - -For example, if you have two audio streams in the first input file, -these streams are identified by "0:0" and "0:1". You can use -@code{-map} to select which streams to place in an output file. For -example: -@example -ffmpeg -i INPUT -map 0:1 out.wav -@end example -will map the input stream in @file{INPUT} identified by "0:1" to -the (single) output stream in @file{out.wav}. - -For example, to select the stream with index 2 from input file -@file{a.mov} (specified by the identifier "0:2"), and stream with -index 6 from input @file{b.mov} (specified by the identifier "1:6"), -and copy them to the output file @file{out.mov}: -@example -ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov -@end example - -To select all video and the third audio stream from an input file: -@example -ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT -@end example - -To map all the streams except the second audio, use negative mappings -@example -ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT -@end example - -Note that using this option disables the default mappings for this output file. - -@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][:@var{output_file_id}.@var{stream_specifier}] -Map an audio channel from a given input to an output. If -@var{output_file_id}.@var{stream_specifier} is not set, the audio channel will -be mapped on all the audio streams. - -Using "-1" instead of -@var{input_file_id}.@var{stream_specifier}.@var{channel_id} will map a muted -channel. - -For example, assuming @var{INPUT} is a stereo audio file, you can switch the -two audio channels with the following command: -@example -ffmpeg -i INPUT -map_channel 0.0.1 -map_channel 0.0.0 OUTPUT -@end example - -If you want to mute the first channel and keep the second: -@example -ffmpeg -i INPUT -map_channel -1 -map_channel 0.0.1 OUTPUT -@end example - -The order of the "-map_channel" option specifies the order of the channels in -the output stream. The output channel layout is guessed from the number of -channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac" -in combination of "-map_channel" makes the channel gain levels to be updated if -input and output channel layouts don't match (for instance two "-map_channel" -options and "-ac 6"). - -You can also extract each channel of an input to specific outputs; the following -command extracts two channels of the @var{INPUT} audio stream (file 0, stream 0) -to the respective @var{OUTPUT_CH0} and @var{OUTPUT_CH1} outputs: -@example -ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1 -@end example - -The following example splits the channels of a stereo input into two separate -streams, which are put into the same output file: -@example -ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg -@end example - -Note that currently each output stream can only contain channels from a single -input stream; you can't for example use "-map_channel" to pick multiple input -audio channels contained in different streams (from the same or different files) -and merge them into a single output stream. It is therefore not currently -possible, for example, to turn two separate mono streams into a single stereo -stream. However splitting a stereo stream into two single channel mono streams -is possible. - -If you need this feature, a possible workaround is to use the @emph{amerge} -filter. For example, if you need to merge a media (here @file{input.mkv}) with 2 -mono audio streams into one single stereo channel audio stream (and keep the -video stream), you can use the following command: -@example -ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv -@end example - -@item -map_metadata[:@var{metadata_spec_out}] @var{infile}[:@var{metadata_spec_in}] (@emph{output,per-metadata}) -Set metadata information of the next output file from @var{infile}. Note that -those are file indices (zero-based), not filenames. -Optional @var{metadata_spec_in/out} parameters specify, which metadata to copy. -A metadata specifier can have the following forms: -@table @option -@item @var{g} -global metadata, i.e. metadata that applies to the whole file - -@item @var{s}[:@var{stream_spec}] -per-stream metadata. @var{stream_spec} is a stream specifier as described -in the @ref{Stream specifiers} chapter. In an input metadata specifier, the first -matching stream is copied from. In an output metadata specifier, all matching -streams are copied to. - -@item @var{c}:@var{chapter_index} -per-chapter metadata. @var{chapter_index} is the zero-based chapter index. - -@item @var{p}:@var{program_index} -per-program metadata. @var{program_index} is the zero-based program index. -@end table -If metadata specifier is omitted, it defaults to global. - -By default, global metadata is copied from the first input file, -per-stream and per-chapter metadata is copied along with streams/chapters. These -default mappings are disabled by creating any mapping of the relevant type. A negative -file index can be used to create a dummy mapping that just disables automatic copying. - -For example to copy metadata from the first stream of the input file to global metadata -of the output file: -@example -ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3 -@end example - -To do the reverse, i.e. copy global metadata to all audio streams: -@example -ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv -@end example -Note that simple @code{0} would work as well in this example, since global -metadata is assumed by default. - -@item -map_chapters @var{input_file_index} (@emph{output}) -Copy chapters from input file with index @var{input_file_index} to the next -output file. If no chapter mapping is specified, then chapters are copied from -the first input file with at least one chapter. Use a negative file index to -disable any chapter copying. - -@item -benchmark (@emph{global}) -Show benchmarking information at the end of an encode. -Shows CPU time used and maximum memory consumption. -Maximum memory consumption is not supported on all systems, -it will usually display as 0 if not supported. -@item -benchmark_all (@emph{global}) -Show benchmarking information during the encode. -Shows CPU time used in various steps (audio/video encode/decode). -@item -timelimit @var{duration} (@emph{global}) -Exit after ffmpeg has been running for @var{duration} seconds. -@item -dump (@emph{global}) -Dump each input packet to stderr. -@item -hex (@emph{global}) -When dumping packets, also dump the payload. -@item -re (@emph{input}) -Read input at native frame rate. Mainly used to simulate a grab device. -or live input stream (e.g. when reading from a file). Should not be used -with actual grab devices or live input streams (where it can cause packet -loss). -By default @command{ffmpeg} attempts to read the input(s) as fast as possible. -This option will slow down the reading of the input(s) to the native frame rate -of the input(s). It is useful for real-time output (e.g. live streaming). -@item -loop_input -Loop over the input stream. Currently it works only for image -streams. This option is used for automatic FFserver testing. -This option is deprecated, use -loop 1. -@item -loop_output @var{number_of_times} -Repeatedly loop output for formats that support looping such as animated GIF -(0 will loop the output infinitely). -This option is deprecated, use -loop. -@item -vsync @var{parameter} -Video sync method. -For compatibility reasons old values can be specified as numbers. -Newly added values will have to be specified as strings always. - -@table @option -@item 0, passthrough -Each frame is passed with its timestamp from the demuxer to the muxer. -@item 1, cfr -Frames will be duplicated and dropped to achieve exactly the requested -constant frame rate. -@item 2, vfr -Frames are passed through with their timestamp or dropped so as to -prevent 2 frames from having the same timestamp. -@item drop -As passthrough but destroys all timestamps, making the muxer generate -fresh timestamps based on frame-rate. -@item -1, auto -Chooses between 1 and 2 depending on muxer capabilities. This is the -default method. -@end table - -Note that the timestamps may be further modified by the muxer, after this. -For example, in the case that the format option @option{avoid_negative_ts} -is enabled. - -With -map you can select from which stream the timestamps should be -taken. You can leave either video or audio unchanged and sync the -remaining stream(s) to the unchanged one. - -@item -async @var{samples_per_second} -Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps, -the parameter is the maximum samples per second by which the audio is changed. --async 1 is a special case where only the start of the audio stream is corrected -without any later correction. - -Note that the timestamps may be further modified by the muxer, after this. -For example, in the case that the format option @option{avoid_negative_ts} -is enabled. - -This option has been deprecated. Use the @code{aresample} audio filter instead. - -@item -copyts -Do not process input timestamps, but keep their values without trying -to sanitize them. In particular, do not remove the initial start time -offset value. - -Note that, depending on the @option{vsync} option or on specific muxer -processing (e.g. in case the format option @option{avoid_negative_ts} -is enabled) the output timestamps may mismatch with the input -timestamps even when this option is selected. - -@item -copytb @var{mode} -Specify how to set the encoder timebase when stream copying. @var{mode} is an -integer numeric value, and can assume one of the following values: - -@table @option -@item 1 -Use the demuxer timebase. - -The time base is copied to the output encoder from the corresponding input -demuxer. This is sometimes required to avoid non monotonically increasing -timestamps when copying video streams with variable frame rate. - -@item 0 -Use the decoder timebase. - -The time base is copied to the output encoder from the corresponding input -decoder. - -@item -1 -Try to make the choice automatically, in order to generate a sane output. -@end table - -Default value is -1. - -@item -shortest (@emph{output}) -Finish encoding when the shortest input stream ends. -@item -dts_delta_threshold -Timestamp discontinuity delta threshold. -@item -muxdelay @var{seconds} (@emph{input}) -Set the maximum demux-decode delay. -@item -muxpreload @var{seconds} (@emph{input}) -Set the initial demux-decode delay. -@item -streamid @var{output-stream-index}:@var{new-value} (@emph{output}) -Assign a new stream-id value to an output stream. This option should be -specified prior to the output filename to which it applies. -For the situation where multiple output files exist, a streamid -may be reassigned to a different value. - -For example, to set the stream 0 PID to 33 and the stream 1 PID to 36 for -an output mpegts file: -@example -ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts -@end example - -@item -bsf[:@var{stream_specifier}] @var{bitstream_filters} (@emph{output,per-stream}) -Set bitstream filters for matching streams. @var{bitstream_filters} is -a comma-separated list of bitstream filters. Use the @code{-bsfs} option -to get the list of bitstream filters. -@example -ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264 -@end example -@example -ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt -@end example - -@item -tag[:@var{stream_specifier}] @var{codec_tag} (@emph{per-stream}) -Force a tag/fourcc for matching streams. - -@item -timecode @var{hh}:@var{mm}:@var{ss}SEP@var{ff} -Specify Timecode for writing. @var{SEP} is ':' for non drop timecode and ';' -(or '.') for drop. -@example -ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg -@end example - -@anchor{filter_complex_option} -@item -filter_complex @var{filtergraph} (@emph{global}) -Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or -outputs. For simple graphs -- those with one input and one output of the same -type -- see the @option{-filter} options. @var{filtergraph} is a description of -the filtergraph, as described in the ``Filtergraph syntax'' section of the -ffmpeg-filters manual. - -Input link labels must refer to input streams using the -@code{[file_index:stream_specifier]} syntax (i.e. the same as @option{-map} -uses). If @var{stream_specifier} matches multiple streams, the first one will be -used. An unlabeled input will be connected to the first unused input stream of -the matching type. - -Output link labels are referred to with @option{-map}. Unlabeled outputs are -added to the first output file. - -Note that with this option it is possible to use only lavfi sources without -normal input files. - -For example, to overlay an image over video -@example -ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map -'[out]' out.mkv -@end example -Here @code{[0:v]} refers to the first video stream in the first input file, -which is linked to the first (main) input of the overlay filter. Similarly the -first video stream in the second input is linked to the second (overlay) input -of overlay. - -Assuming there is only one video stream in each input file, we can omit input -labels, so the above is equivalent to -@example -ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map -'[out]' out.mkv -@end example - -Furthermore we can omit the output label and the single output from the filter -graph will be added to the output file automatically, so we can simply write -@example -ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv -@end example - -To generate 5 seconds of pure red video using lavfi @code{color} source: -@example -ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv -@end example - -@item -lavfi @var{filtergraph} (@emph{global}) -Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or -outputs. Equivalent to @option{-filter_complex}. - -@item -filter_complex_script @var{filename} (@emph{global}) -This option is similar to @option{-filter_complex}, the only difference is that -its argument is the name of the file from which a complex filtergraph -description is to be read. - -@item -accurate_seek (@emph{input}) -This option enables or disables accurate seeking in input files with the -@option{-ss} option. It is enabled by default, so seeking is accurate when -transcoding. Use @option{-noaccurate_seek} to disable it, which may be useful -e.g. when copying some streams and transcoding the others. - -@item -override_ffserver (@emph{global}) -Overrides the input specifications from @command{ffserver}. Using this -option you can map any input stream to @command{ffserver} and control -many aspects of the encoding from @command{ffmpeg}. Without this -option @command{ffmpeg} will transmit to @command{ffserver} what is -requested by @command{ffserver}. - -The option is intended for cases where features are needed that cannot be -specified to @command{ffserver} but can be to @command{ffmpeg}. - -@end table - -As a special exception, you can use a bitmap subtitle stream as input: it -will be converted into a video with the same size as the largest video in -the file, or 720x576 if no video is present. Note that this is an -experimental and temporary solution. It will be removed once libavfilter has -proper support for subtitles. - -For example, to hardcode subtitles on top of a DVB-T recording stored in -MPEG-TS format, delaying the subtitles by 1 second: -@example -ffmpeg -i input.ts -filter_complex \ - '[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \ - -sn -map '#0x2dc' output.mkv -@end example -(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video, -audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too) - -@section Preset files -A preset file contains a sequence of @var{option}=@var{value} pairs, -one for each line, specifying a sequence of options which would be -awkward to specify on the command line. Lines starting with the hash -('#') character are ignored and are used to provide comments. Check -the @file{presets} directory in the FFmpeg source tree for examples. - -Preset files are specified with the @code{vpre}, @code{apre}, -@code{spre}, and @code{fpre} options. The @code{fpre} option takes the -filename of the preset instead of a preset name as input and can be -used for any kind of codec. For the @code{vpre}, @code{apre}, and -@code{spre} options, the options specified in a preset file are -applied to the currently selected codec of the same type as the preset -option. - -The argument passed to the @code{vpre}, @code{apre}, and @code{spre} -preset options identifies the preset file to use according to the -following rules: - -First ffmpeg searches for a file named @var{arg}.ffpreset in the -directories @file{$FFMPEG_DATADIR} (if set), and @file{$HOME/.ffmpeg}, and in -the datadir defined at configuration time (usually @file{PREFIX/share/ffmpeg}) -or in a @file{ffpresets} folder along the executable on win32, -in that order. For example, if the argument is @code{libvpx-1080p}, it will -search for the file @file{libvpx-1080p.ffpreset}. - -If no such file is found, then ffmpeg will search for a file named -@var{codec_name}-@var{arg}.ffpreset in the above-mentioned -directories, where @var{codec_name} is the name of the codec to which -the preset file options will be applied. For example, if you select -the video codec with @code{-vcodec libvpx} and use @code{-vpre 1080p}, -then it will search for the file @file{libvpx-1080p.ffpreset}. -@c man end OPTIONS - -@chapter Tips -@c man begin TIPS - -@itemize -@item -For streaming at very low bitrates, use a low frame rate -and a small GOP size. This is especially true for RealVideo where -the Linux player does not seem to be very fast, so it can miss -frames. An example is: - -@example -ffmpeg -g 3 -r 3 -t 10 -b:v 50k -s qcif -f rv10 /tmp/b.rm -@end example - -@item -The parameter 'q' which is displayed while encoding is the current -quantizer. The value 1 indicates that a very good quality could -be achieved. The value 31 indicates the worst quality. If q=31 appears -too often, it means that the encoder cannot compress enough to meet -your bitrate. You must either increase the bitrate, decrease the -frame rate or decrease the frame size. - -@item -If your computer is not fast enough, you can speed up the -compression at the expense of the compression ratio. You can use -'-me zero' to speed up motion estimation, and '-g 0' to disable -motion estimation completely (you have only I-frames, which means it -is about as good as JPEG compression). - -@item -To have very low audio bitrates, reduce the sampling frequency -(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3). - -@item -To have a constant quality (but a variable bitrate), use the option -'-qscale n' when 'n' is between 1 (excellent quality) and 31 (worst -quality). - -@end itemize -@c man end TIPS - -@chapter Examples -@c man begin EXAMPLES - -@section Preset files - -A preset file contains a sequence of @var{option=value} pairs, one for -each line, specifying a sequence of options which can be specified also on -the command line. Lines starting with the hash ('#') character are ignored and -are used to provide comments. Empty lines are also ignored. Check the -@file{presets} directory in the FFmpeg source tree for examples. - -Preset files are specified with the @code{pre} option, this option takes a -preset name as input. FFmpeg searches for a file named @var{preset_name}.avpreset in -the directories @file{$AVCONV_DATADIR} (if set), and @file{$HOME/.ffmpeg}, and in -the data directory defined at configuration time (usually @file{$PREFIX/share/ffmpeg}) -in that order. For example, if the argument is @code{libx264-max}, it will -search for the file @file{libx264-max.avpreset}. - -@section Video and Audio grabbing - -If you specify the input format and device then ffmpeg can grab video -and audio directly. - -@example -ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg -@end example - -Or with an ALSA audio source (mono input, card id 1) instead of OSS: -@example -ffmpeg -f alsa -ac 1 -i hw:1 -f video4linux2 -i /dev/video0 /tmp/out.mpg -@end example - -Note that you must activate the right video source and channel before -launching ffmpeg with any TV viewer such as -@uref{http://linux.bytesex.org/xawtv/, xawtv} by Gerd Knorr. You also -have to set the audio recording levels correctly with a -standard mixer. - -@section X11 grabbing - -Grab the X11 display with ffmpeg via - -@example -ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0 /tmp/out.mpg -@end example - -0.0 is display.screen number of your X11 server, same as -the DISPLAY environment variable. - -@example -ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0+10,20 /tmp/out.mpg -@end example - -0.0 is display.screen number of your X11 server, same as the DISPLAY environment -variable. 10 is the x-offset and 20 the y-offset for the grabbing. - -@section Video and Audio file format conversion - -Any supported file format and protocol can serve as input to ffmpeg: - -Examples: -@itemize -@item -You can use YUV files as input: - -@example -ffmpeg -i /tmp/test%d.Y /tmp/out.mpg -@end example - -It will use the files: -@example -/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V, -/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc... -@end example - -The Y files use twice the resolution of the U and V files. They are -raw files, without header. They can be generated by all decent video -decoders. You must specify the size of the image with the @option{-s} option -if ffmpeg cannot guess it. - -@item -You can input from a raw YUV420P file: - -@example -ffmpeg -i /tmp/test.yuv /tmp/out.avi -@end example - -test.yuv is a file containing raw YUV planar data. Each frame is composed -of the Y plane followed by the U and V planes at half vertical and -horizontal resolution. - -@item -You can output to a raw YUV420P file: - -@example -ffmpeg -i mydivx.avi hugefile.yuv -@end example - -@item -You can set several input files and output files: - -@example -ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg -@end example - -Converts the audio file a.wav and the raw YUV video file a.yuv -to MPEG file a.mpg. - -@item -You can also do audio and video conversions at the same time: - -@example -ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2 -@end example - -Converts a.wav to MPEG audio at 22050 Hz sample rate. - -@item -You can encode to several formats at the same time and define a -mapping from input stream to output streams: - -@example -ffmpeg -i /tmp/a.wav -map 0:a -b:a 64k /tmp/a.mp2 -map 0:a -b:a 128k /tmp/b.mp2 -@end example - -Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. '-map -file:index' specifies which input stream is used for each output -stream, in the order of the definition of output streams. - -@item -You can transcode decrypted VOBs: - -@example -ffmpeg -i snatch_1.vob -f avi -c:v mpeg4 -b:v 800k -g 300 -bf 2 -c:a libmp3lame -b:a 128k snatch.avi -@end example - -This is a typical DVD ripping example; the input is a VOB file, the -output an AVI file with MPEG-4 video and MP3 audio. Note that in this -command we use B-frames so the MPEG-4 stream is DivX5 compatible, and -GOP size is 300 which means one intra frame every 10 seconds for 29.97fps -input video. Furthermore, the audio stream is MP3-encoded so you need -to enable LAME support by passing @code{--enable-libmp3lame} to configure. -The mapping is particularly useful for DVD transcoding -to get the desired audio language. - -NOTE: To see the supported input formats, use @code{ffmpeg -formats}. - -@item -You can extract images from a video, or create a video from many images: - -For extracting images from a video: -@example -ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg -@end example - -This will extract one video frame per second from the video and will -output them in files named @file{foo-001.jpeg}, @file{foo-002.jpeg}, -etc. Images will be rescaled to fit the new WxH values. - -If you want to extract just a limited number of frames, you can use the -above command in combination with the -vframes or -t option, or in -combination with -ss to start extracting from a certain point in time. - -For creating a video from many images: -@example -ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi -@end example - -The syntax @code{foo-%03d.jpeg} specifies to use a decimal number -composed of three digits padded with zeroes to express the sequence -number. It is the same syntax supported by the C printf function, but -only formats accepting a normal integer are suitable. - -When importing an image sequence, -i also supports expanding -shell-like wildcard patterns (globbing) internally, by selecting the -image2-specific @code{-pattern_type glob} option. - -For example, for creating a video from filenames matching the glob pattern -@code{foo-*.jpeg}: -@example -ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi -@end example - -@item -You can put many streams of the same type in the output: - -@example -ffmpeg -i test1.avi -i test2.avi -map 0:3 -map 0:2 -map 0:1 -map 0:0 -c copy test12.nut -@end example - -The resulting output file @file{test12.avi} will contain first four streams from -the input file in reverse order. - -@item -To force CBR video output: -@example -ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v -@end example - -@item -The four options lmin, lmax, mblmin and mblmax use 'lambda' units, -but you may use the QP2LAMBDA constant to easily convert from 'q' units: -@example -ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext -@end example - -@end itemize -@c man end EXAMPLES - -@include config.texi -@ifset config-all -@ifset config-avutil -@include utils.texi -@end ifset -@ifset config-avcodec -@include codecs.texi -@include bitstream_filters.texi -@end ifset -@ifset config-avformat -@include formats.texi -@include protocols.texi -@end ifset -@ifset config-avdevice -@include devices.texi -@end ifset -@ifset config-swresample -@include resampler.texi -@end ifset -@ifset config-swscale -@include scaler.texi -@end ifset -@ifset config-avfilter -@include filters.texi -@end ifset -@end ifset - -@chapter See Also - -@ifhtml -@ifset config-all -@url{ffmpeg.html,ffmpeg} -@end ifset -@ifset config-not-all -@url{ffmpeg-all.html,ffmpeg-all}, -@end ifset -@url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{ffmpeg-utils.html,ffmpeg-utils}, -@url{ffmpeg-scaler.html,ffmpeg-scaler}, -@url{ffmpeg-resampler.html,ffmpeg-resampler}, -@url{ffmpeg-codecs.html,ffmpeg-codecs}, -@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters}, -@url{ffmpeg-formats.html,ffmpeg-formats}, -@url{ffmpeg-devices.html,ffmpeg-devices}, -@url{ffmpeg-protocols.html,ffmpeg-protocols}, -@url{ffmpeg-filters.html,ffmpeg-filters} -@end ifhtml - -@ifnothtml -@ifset config-all -ffmpeg(1), -@end ifset -@ifset config-not-all -ffmpeg-all(1), -@end ifset -ffplay(1), ffprobe(1), ffserver(1), -ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1), -ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1), -ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffmpeg -@settitle ffmpeg video converter - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffmpeg.txt b/ffmpeg/doc/ffmpeg.txt deleted file mode 100644 index a028ca2..0000000 --- a/ffmpeg/doc/ffmpeg.txt +++ /dev/null @@ -1,47 +0,0 @@ - : - ffmpeg.c : libav* - ======== : ====== - : - : - --------------------------------:---> AVStream... - InputStream input_streams[] / : - / : - InputFile input_files[] +==========================+ / ^ : - ------> 0 | : st ---:-----------:--/ : : - ^ +------+-----------+-----+ / +--------------------------+ : : - : | :ist_index--:-----:---------/ 1 | : st : | : : - : +------+-----------+-----+ +==========================+ : : - nb_input_files : | :ist_index--:-----:------------------> 2 | : st : | : : - : +------+-----------+-----+ +--------------------------+ : nb_input_streams : - : | :ist_index : | 3 | ... | : : - v +------+-----------+-----+ +--------------------------+ : : - --> 4 | | : : - | +--------------------------+ : : - | 5 | | : : - | +==========================+ v : - | : - | : - | : - | : - --------- --------------------------------:---> AVStream... - \ / : - OutputStream output_streams[] / : - \ / : - +======\======================/======+ ^ : - ------> 0 | : source_index : st-:--- | : : - OutputFile output_files[] / +------------------------------------+ : : - / 1 | : : : | : : - ^ +------+------------+-----+ / +------------------------------------+ : : - : | : ost_index -:-----:------/ 2 | : : : | : : - nb_output_files : +------+------------+-----+ +====================================+ : : - : | : ost_index -:-----|-----------------> 3 | : : : | : : - : +------+------------+-----+ +------------------------------------+ : nb_output_streams : - : | : : | 4 | | : : - : +------+------------+-----+ +------------------------------------+ : : - : | : : | 5 | | : : - v +------+------------+-----+ +------------------------------------+ : : - 6 | | : : - +------------------------------------+ : : - 7 | | : : - +====================================+ v : - : diff --git a/ffmpeg/doc/ffplay.texi b/ffmpeg/doc/ffplay.texi deleted file mode 100644 index 54b6f19..0000000 --- a/ffmpeg/doc/ffplay.texi +++ /dev/null @@ -1,277 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle ffplay Documentation -@titlepage -@center @titlefont{ffplay Documentation} -@end titlepage - -@top - -@contents - -@chapter Synopsis - -ffplay [@var{options}] [@file{input_file}] - -@chapter Description -@c man begin DESCRIPTION - -FFplay is a very simple and portable media player using the FFmpeg -libraries and the SDL library. It is mostly used as a testbed for the -various FFmpeg APIs. -@c man end - -@chapter Options -@c man begin OPTIONS - -@include fftools-common-opts.texi - -@section Main options - -@table @option -@item -x @var{width} -Force displayed width. -@item -y @var{height} -Force displayed height. -@item -s @var{size} -Set frame size (WxH or abbreviation), needed for videos which do -not contain a header with the frame size like raw YUV. This option -has been deprecated in favor of private options, try -video_size. -@item -an -Disable audio. -@item -vn -Disable video. -@item -ss @var{pos} -Seek to a given position in seconds. -@item -t @var{duration} -play <duration> seconds of audio/video -@item -bytes -Seek by bytes. -@item -nodisp -Disable graphical display. -@item -f @var{fmt} -Force format. -@item -window_title @var{title} -Set window title (default is the input filename). -@item -loop @var{number} -Loops movie playback <number> times. 0 means forever. -@item -showmode @var{mode} -Set the show mode to use. -Available values for @var{mode} are: -@table @samp -@item 0, video -show video -@item 1, waves -show audio waves -@item 2, rdft -show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform) -@end table - -Default value is "video", if video is not present or cannot be played -"rdft" is automatically selected. - -You can interactively cycle through the available show modes by -pressing the key @key{w}. - -@item -vf @var{filtergraph} -Create the filtergraph specified by @var{filtergraph} and use it to -filter the video stream. - -@var{filtergraph} is a description of the filtergraph to apply to -the stream, and must have a single video input and a single video -output. In the filtergraph, the input is associated to the label -@code{in}, and the output to the label @code{out}. See the -ffmpeg-filters manual for more information about the filtergraph -syntax. - -@item -af @var{filtergraph} -@var{filtergraph} is a description of the filtergraph to apply to -the input audio. -Use the option "-filters" to show all the available filters (including -sources and sinks). - -@item -i @var{input_file} -Read @var{input_file}. -@end table - -@section Advanced options -@table @option -@item -pix_fmt @var{format} -Set pixel format. -This option has been deprecated in favor of private options, try -pixel_format. - -@item -stats -Print several playback statistics, in particular show the stream -duration, the codec parameters, the current position in the stream and -the audio/video synchronisation drift. It is on by default, to -explicitly disable it you need to specify @code{-nostats}. - -@item -bug -Work around bugs. -@item -fast -Non-spec-compliant optimizations. -@item -genpts -Generate pts. -@item -rtp_tcp -Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful -if you are streaming with the RTSP protocol. -@item -sync @var{type} -Set the master clock to audio (@code{type=audio}), video -(@code{type=video}) or external (@code{type=ext}). Default is audio. The -master clock is used to control audio-video synchronization. Most media -players use audio as master clock, but in some cases (streaming or high -quality broadcast) it is necessary to change that. This option is mainly -used for debugging purposes. -@item -threads @var{count} -Set the thread count. -@item -ast @var{audio_stream_number} -Select the desired audio stream number, counting from 0. The number -refers to the list of all the input audio streams. If it is greater -than the number of audio streams minus one, then the last one is -selected, if it is negative the audio playback is disabled. -@item -vst @var{video_stream_number} -Select the desired video stream number, counting from 0. The number -refers to the list of all the input video streams. If it is greater -than the number of video streams minus one, then the last one is -selected, if it is negative the video playback is disabled. -@item -sst @var{subtitle_stream_number} -Select the desired subtitle stream number, counting from 0. The number -refers to the list of all the input subtitle streams. If it is greater -than the number of subtitle streams minus one, then the last one is -selected, if it is negative the subtitle rendering is disabled. -@item -autoexit -Exit when video is done playing. -@item -exitonkeydown -Exit if any key is pressed. -@item -exitonmousedown -Exit if any mouse button is pressed. - -@item -codec:@var{media_specifier} @var{codec_name} -Force a specific decoder implementation for the stream identified by -@var{media_specifier}, which can assume the values @code{a} (audio), -@code{v} (video), and @code{s} subtitle. - -@item -acodec @var{codec_name} -Force a specific audio decoder. - -@item -vcodec @var{codec_name} -Force a specific video decoder. - -@item -scodec @var{codec_name} -Force a specific subtitle decoder. -@end table - -@section While playing - -@table @key -@item q, ESC -Quit. - -@item f -Toggle full screen. - -@item p, SPC -Pause. - -@item a -Cycle audio channel in the curret program. - -@item v -Cycle video channel. - -@item t -Cycle subtitle channel in the current program. - -@item c -Cycle program. - -@item w -Show audio waves. - -@item left/right -Seek backward/forward 10 seconds. - -@item down/up -Seek backward/forward 1 minute. - -@item page down/page up -Seek backward/forward 10 minutes. - -@item mouse click -Seek to percentage in file corresponding to fraction of width. - -@end table - -@c man end - -@include config.texi -@ifset config-all -@ifset config-avutil -@include utils.texi -@end ifset -@ifset config-avcodec -@include codecs.texi -@include bitstream_filters.texi -@end ifset -@ifset config-avformat -@include formats.texi -@include protocols.texi -@end ifset -@ifset config-avdevice -@include devices.texi -@end ifset -@ifset config-swresample -@include resampler.texi -@end ifset -@ifset config-swscale -@include scaler.texi -@end ifset -@ifset config-avfilter -@include filters.texi -@end ifset -@end ifset - -@chapter See Also - -@ifhtml -@ifset config-all -@url{ffplay.html,ffplay}, -@end ifset -@ifset config-not-all -@url{ffplay-all.html,ffmpeg-all}, -@end ifset -@url{ffmpeg.html,ffmpeg}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{ffmpeg-utils.html,ffmpeg-utils}, -@url{ffmpeg-scaler.html,ffmpeg-scaler}, -@url{ffmpeg-resampler.html,ffmpeg-resampler}, -@url{ffmpeg-codecs.html,ffmpeg-codecs}, -@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters}, -@url{ffmpeg-formats.html,ffmpeg-formats}, -@url{ffmpeg-devices.html,ffmpeg-devices}, -@url{ffmpeg-protocols.html,ffmpeg-protocols}, -@url{ffmpeg-filters.html,ffmpeg-filters} -@end ifhtml - -@ifnothtml -@ifset config-all -ffplay(1), -@end ifset -@ifset config-not-all -ffplay-all(1), -@end ifset -ffmpeg(1), ffprobe(1), ffserver(1), -ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1), -ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1), -ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffplay -@settitle FFplay media player - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffprobe.texi b/ffmpeg/doc/ffprobe.texi deleted file mode 100644 index 75d1e72..0000000 --- a/ffmpeg/doc/ffprobe.texi +++ /dev/null @@ -1,670 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle ffprobe Documentation -@titlepage -@center @titlefont{ffprobe Documentation} -@end titlepage - -@top - -@contents - -@chapter Synopsis - -ffprobe [@var{options}] [@file{input_file}] - -@chapter Description -@c man begin DESCRIPTION - -ffprobe gathers information from multimedia streams and prints it in -human- and machine-readable fashion. - -For example it can be used to check the format of the container used -by a multimedia stream and the format and type of each media stream -contained in it. - -If a filename is specified in input, ffprobe will try to open and -probe the file content. If the file cannot be opened or recognized as -a multimedia file, a positive exit code is returned. - -ffprobe may be employed both as a standalone application or in -combination with a textual filter, which may perform more -sophisticated processing, e.g. statistical processing or plotting. - -Options are used to list some of the formats supported by ffprobe or -for specifying which information to display, and for setting how -ffprobe will show it. - -ffprobe output is designed to be easily parsable by a textual filter, -and consists of one or more sections of a form defined by the selected -writer, which is specified by the @option{print_format} option. - -Sections may contain other nested sections, and are identified by a -name (which may be shared by other sections), and an unique -name. See the output of @option{sections}. - -Metadata tags stored in the container or in the streams are recognized -and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM" -section. - -@c man end - -@chapter Options -@c man begin OPTIONS - -@include fftools-common-opts.texi - -@section Main options - -@table @option - -@item -f @var{format} -Force format to use. - -@item -unit -Show the unit of the displayed values. - -@item -prefix -Use SI prefixes for the displayed values. -Unless the "-byte_binary_prefix" option is used all the prefixes -are decimal. - -@item -byte_binary_prefix -Force the use of binary prefixes for byte values. - -@item -sexagesimal -Use sexagesimal format HH:MM:SS.MICROSECONDS for time values. - -@item -pretty -Prettify the format of the displayed values, it corresponds to the -options "-unit -prefix -byte_binary_prefix -sexagesimal". - -@item -of, -print_format @var{writer_name}[=@var{writer_options}] -Set the output printing format. - -@var{writer_name} specifies the name of the writer, and -@var{writer_options} specifies the options to be passed to the writer. - -For example for printing the output in JSON format, specify: -@example --print_format json -@end example - -For more details on the available output printing formats, see the -Writers section below. - -@item -sections -Print sections structure and section information, and exit. The output -is not meant to be parsed by a machine. - -@item -select_streams @var{stream_specifier} -Select only the streams specified by @var{stream_specifier}. This -option affects only the options related to streams -(e.g. @code{show_streams}, @code{show_packets}, etc.). - -For example to show only audio streams, you can use the command: -@example -ffprobe -show_streams -select_streams a INPUT -@end example - -To show only video packets belonging to the video stream with index 1: -@example -ffprobe -show_packets -select_streams v:1 INPUT -@end example - -@item -show_data -Show payload data, as a hexadecimal and ASCII dump. Coupled with -@option{-show_packets}, it will dump the packets' data. Coupled with -@option{-show_streams}, it will dump the codec extradata. - -The dump is printed as the "data" field. It may contain newlines. - -@item -show_error -Show information about the error found when trying to probe the input. - -The error information is printed within a section with name "ERROR". - -@item -show_format -Show information about the container format of the input multimedia -stream. - -All the container format information is printed within a section with -name "FORMAT". - -@item -show_format_entry @var{name} -Like @option{-show_format}, but only prints the specified entry of the -container format information, rather than all. This option may be given more -than once, then all specified entries will be shown. - -This option is deprecated, use @code{show_entries} instead. - -@item -show_entries @var{section_entries} -Set list of entries to show. - -Entries are specified according to the following -syntax. @var{section_entries} contains a list of section entries -separated by @code{:}. Each section entry is composed by a section -name (or unique name), optionally followed by a list of entries local -to that section, separated by @code{,}. - -If section name is specified but is followed by no @code{=}, all -entries are printed to output, together with all the contained -sections. Otherwise only the entries specified in the local section -entries list are printed. In particular, if @code{=} is specified but -the list of local entries is empty, then no entries will be shown for -that section. - -Note that the order of specification of the local section entries is -not honored in the output, and the usual display order will be -retained. - -The formal syntax is given by: -@example -@var{LOCAL_SECTION_ENTRIES} ::= @var{SECTION_ENTRY_NAME}[,@var{LOCAL_SECTION_ENTRIES}] -@var{SECTION_ENTRY} ::= @var{SECTION_NAME}[=[@var{LOCAL_SECTION_ENTRIES}]] -@var{SECTION_ENTRIES} ::= @var{SECTION_ENTRY}[:@var{SECTION_ENTRIES}] -@end example - -For example, to show only the index and type of each stream, and the PTS -time, duration time, and stream index of the packets, you can specify -the argument: -@example -packet=pts_time,duration_time,stream_index : stream=index,codec_type -@end example - -To show all the entries in the section "format", but only the codec -type in the section "stream", specify the argument: -@example -format : stream=codec_type -@end example - -To show all the tags in the stream and format sections: -@example -format_tags : format_tags -@end example - -To show only the @code{title} tag (if available) in the stream -sections: -@example -stream_tags=title -@end example - -@item -show_packets -Show information about each packet contained in the input multimedia -stream. - -The information for each single packet is printed within a dedicated -section with name "PACKET". - -@item -show_frames -Show information about each frame and subtitle contained in the input -multimedia stream. - -The information for each single frame is printed within a dedicated -section with name "FRAME" or "SUBTITLE". - -@item -show_streams -Show information about each media stream contained in the input -multimedia stream. - -Each media stream information is printed within a dedicated section -with name "STREAM". - -@item -show_programs -Show information about programs and their streams contained in the input -multimedia stream. - -Each media stream information is printed within a dedicated section -with name "PROGRAM_STREAM". - -@item -show_chapters -Show information about chapters stored in the format. - -Each chapter is printed within a dedicated section with name "CHAPTER". - -@item -count_frames -Count the number of frames per stream and report it in the -corresponding stream section. - -@item -count_packets -Count the number of packets per stream and report it in the -corresponding stream section. - -@item -read_intervals @var{read_intervals} - -Read only the specified intervals. @var{read_intervals} must be a -sequence of interval specifications separated by ",". -@command{ffprobe} will seek to the interval starting point, and will -continue reading from that. - -Each interval is specified by two optional parts, separated by "%". - -The first part specifies the interval start position. It is -interpreted as an abolute position, or as a relative offset from the -current position if it is preceded by the "+" character. If this first -part is not specified, no seeking will be performed when reading this -interval. - -The second part specifies the interval end position. It is interpreted -as an absolute position, or as a relative offset from the current -position if it is preceded by the "+" character. If the offset -specification starts with "#", it is interpreted as the number of -packets to read (not including the flushing packets) from the interval -start. If no second part is specified, the program will read until the -end of the input. - -Note that seeking is not accurate, thus the actual interval start -point may be different from the specified position. Also, when an -interval duration is specified, the absolute end time will be computed -by adding the duration to the interval start point found by seeking -the file, rather than to the specified start value. - -The formal syntax is given by: -@example -@var{INTERVAL} ::= [@var{START}|+@var{START_OFFSET}][%[@var{END}|+@var{END_OFFSET}]] -@var{INTERVALS} ::= @var{INTERVAL}[,@var{INTERVALS}] -@end example - -A few examples follow. -@itemize -@item -Seek to time 10, read packets until 20 seconds after the found seek -point, then seek to position @code{01:30} (1 minute and thirty -seconds) and read packets until position @code{01:45}. -@example -10%+20,01:30%01:45 -@end example - -@item -Read only 42 packets after seeking to position @code{01:23}: -@example -01:23%+#42 -@end example - -@item -Read only the first 20 seconds from the start: -@example -%+20 -@end example - -@item -Read from the start until position @code{02:30}: -@example -%02:30 -@end example -@end itemize - -@item -show_private_data, -private -Show private data, that is data depending on the format of the -particular shown element. -This option is enabled by default, but you may need to disable it -for specific uses, for example when creating XSD-compliant XML output. - -@item -show_program_version -Show information related to program version. - -Version information is printed within a section with name -"PROGRAM_VERSION". - -@item -show_library_versions -Show information related to library versions. - -Version information for each library is printed within a section with -name "LIBRARY_VERSION". - -@item -show_versions -Show information related to program and library versions. This is the -equivalent of setting both @option{-show_program_version} and -@option{-show_library_versions} options. - -@item -bitexact -Force bitexact output, useful to produce output which is not dependent -on the specific build. - -@item -i @var{input_file} -Read @var{input_file}. - -@end table -@c man end - -@chapter Writers -@c man begin WRITERS - -A writer defines the output format adopted by @command{ffprobe}, and will be -used for printing all the parts of the output. - -A writer may accept one or more arguments, which specify the options -to adopt. The options are specified as a list of @var{key}=@var{value} -pairs, separated by ":". - -All writers support the following options: - -@table @option -@item string_validation, sv -Set string validation mode. - -The following values are accepted. -@table @samp -@item fail -The writer will fail immediately in case an invalid string (UTF-8) -sequence or code point is found in the input. This is especially -useful to validate input metadata. - -@item ignore -Any validation error will be ignored. This will result in possibly -broken output, especially with the json or xml writer. - -@item replace -The writer will substitute invalid UTF-8 sequences or code points with -the string specified with the @option{string_validation_replacement}. -@end table - -Default value is @samp{replace}. - -@item string_validation_replacement, svr -Set replacement string to use in case @option{string_validation} is -set to @samp{replace}. - -In case the option is not specified, the writer will assume the empty -string, that is it will remove the invalid sequences from the input -strings. -@end table - -A description of the currently available writers follows. - -@section default -Default format. - -Print each section in the form: -@example -[SECTION] -key1=val1 -... -keyN=valN -[/SECTION] -@end example - -Metadata tags are printed as a line in the corresponding FORMAT, STREAM or -PROGRAM_STREAM section, and are prefixed by the string "TAG:". - -A description of the accepted options follows. - -@table @option - -@item nokey, nk -If set to 1 specify not to print the key of each field. Default value -is 0. - -@item noprint_wrappers, nw -If set to 1 specify not to print the section header and footer. -Default value is 0. -@end table - -@section compact, csv -Compact and CSV format. - -The @code{csv} writer is equivalent to @code{compact}, but supports -different defaults. - -Each section is printed on a single line. -If no option is specifid, the output has the form: -@example -section|key1=val1| ... |keyN=valN -@end example - -Metadata tags are printed in the corresponding "format" or "stream" -section. A metadata tag key, if printed, is prefixed by the string -"tag:". - -The description of the accepted options follows. - -@table @option - -@item item_sep, s -Specify the character to use for separating fields in the output line. -It must be a single printable character, it is "|" by default ("," for -the @code{csv} writer). - -@item nokey, nk -If set to 1 specify not to print the key of each field. Its default -value is 0 (1 for the @code{csv} writer). - -@item escape, e -Set the escape mode to use, default to "c" ("csv" for the @code{csv} -writer). - -It can assume one of the following values: -@table @option -@item c -Perform C-like escaping. Strings containing a newline ('\n'), carriage -return ('\r'), a tab ('\t'), a form feed ('\f'), the escaping -character ('\') or the item separator character @var{SEP} are escaped using C-like fashioned -escaping, so that a newline is converted to the sequence "\n", a -carriage return to "\r", '\' to "\\" and the separator @var{SEP} is -converted to "\@var{SEP}". - -@item csv -Perform CSV-like escaping, as described in RFC4180. Strings -containing a newline ('\n'), a carriage return ('\r'), a double quote -('"'), or @var{SEP} are enclosed in double-quotes. - -@item none -Perform no escaping. -@end table - -@item print_section, p -Print the section name at the begin of each line if the value is -@code{1}, disable it with value set to @code{0}. Default value is -@code{1}. - -@end table - -@section flat -Flat format. - -A free-form output where each line contains an explicit key=value, such as -"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be -directly embedded in sh scripts as long as the separator character is an -alphanumeric character or an underscore (see @var{sep_char} option). - -The description of the accepted options follows. - -@table @option -@item sep_char, s -Separator character used to separate the chapter, the section name, IDs and -potential tags in the printed field key. - -Default value is '.'. - -@item hierarchical, h -Specify if the section name specification should be hierarchical. If -set to 1, and if there is more than one section in the current -chapter, the section name will be prefixed by the name of the -chapter. A value of 0 will disable this behavior. - -Default value is 1. -@end table - -@section ini -INI format output. - -Print output in an INI based format. - -The following conventions are adopted: - -@itemize -@item -all key and values are UTF-8 -@item -'.' is the subgroup separator -@item -newline, '\t', '\f', '\b' and the following characters are escaped -@item -'\' is the escape character -@item -'#' is the comment indicator -@item -'=' is the key/value separator -@item -':' is not used but usually parsed as key/value separator -@end itemize - -This writer accepts options as a list of @var{key}=@var{value} pairs, -separated by ":". - -The description of the accepted options follows. - -@table @option -@item hierarchical, h -Specify if the section name specification should be hierarchical. If -set to 1, and if there is more than one section in the current -chapter, the section name will be prefixed by the name of the -chapter. A value of 0 will disable this behavior. - -Default value is 1. -@end table - -@section json -JSON based format. - -Each section is printed using JSON notation. - -The description of the accepted options follows. - -@table @option - -@item compact, c -If set to 1 enable compact output, that is each section will be -printed on a single line. Default value is 0. -@end table - -For more information about JSON, see @url{http://www.json.org/}. - -@section xml -XML based format. - -The XML output is described in the XML schema description file -@file{ffprobe.xsd} installed in the FFmpeg datadir. - -An updated version of the schema can be retrieved at the url -@url{http://www.ffmpeg.org/schema/ffprobe.xsd}, which redirects to the -latest schema committed into the FFmpeg development source code tree. - -Note that the output issued will be compliant to the -@file{ffprobe.xsd} schema only when no special global output options -(@option{unit}, @option{prefix}, @option{byte_binary_prefix}, -@option{sexagesimal} etc.) are specified. - -The description of the accepted options follows. - -@table @option - -@item fully_qualified, q -If set to 1 specify if the output should be fully qualified. Default -value is 0. -This is required for generating an XML file which can be validated -through an XSD file. - -@item xsd_compliant, x -If set to 1 perform more checks for ensuring that the output is XSD -compliant. Default value is 0. -This option automatically sets @option{fully_qualified} to 1. -@end table - -For more information about the XML format, see -@url{http://www.w3.org/XML/}. -@c man end WRITERS - -@chapter Timecode -@c man begin TIMECODE - -@command{ffprobe} supports Timecode extraction: - -@itemize - -@item -MPEG1/2 timecode is extracted from the GOP, and is available in the video -stream details (@option{-show_streams}, see @var{timecode}). - -@item -MOV timecode is extracted from tmcd track, so is available in the tmcd -stream metadata (@option{-show_streams}, see @var{TAG:timecode}). - -@item -DV, GXF and AVI timecodes are available in format metadata -(@option{-show_format}, see @var{TAG:timecode}). - -@end itemize -@c man end TIMECODE - -@include config.texi -@ifset config-all -@ifset config-avutil -@include utils.texi -@end ifset -@ifset config-avcodec -@include codecs.texi -@include bitstream_filters.texi -@end ifset -@ifset config-avformat -@include formats.texi -@include protocols.texi -@end ifset -@ifset config-avdevice -@include devices.texi -@end ifset -@ifset config-swresample -@include resampler.texi -@end ifset -@ifset config-swscale -@include scaler.texi -@end ifset -@ifset config-avfilter -@include filters.texi -@end ifset -@end ifset - -@chapter See Also - -@ifhtml -@ifset config-all -@url{ffprobe.html,ffprobe}, -@end ifset -@ifset config-not-all -@url{ffprobe-all.html,ffprobe-all}, -@end ifset -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffserver.html,ffserver}, -@url{ffmpeg-utils.html,ffmpeg-utils}, -@url{ffmpeg-scaler.html,ffmpeg-scaler}, -@url{ffmpeg-resampler.html,ffmpeg-resampler}, -@url{ffmpeg-codecs.html,ffmpeg-codecs}, -@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters}, -@url{ffmpeg-formats.html,ffmpeg-formats}, -@url{ffmpeg-devices.html,ffmpeg-devices}, -@url{ffmpeg-protocols.html,ffmpeg-protocols}, -@url{ffmpeg-filters.html,ffmpeg-filters} -@end ifhtml - -@ifnothtml -@ifset config-all -ffprobe(1), -@end ifset -@ifset config-not-all -ffprobe-all(1), -@end ifset -ffmpeg(1), ffplay(1), ffserver(1), -ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1), -ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1), -ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffprobe -@settitle ffprobe media prober - -@end ignore - -@bye diff --git a/ffmpeg/doc/ffprobe.xsd b/ffmpeg/doc/ffprobe.xsd deleted file mode 100644 index 1bc1fb5..0000000 --- a/ffmpeg/doc/ffprobe.xsd +++ /dev/null @@ -1,260 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> - -<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema" - targetNamespace="http://www.ffmpeg.org/schema/ffprobe" - xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe"> - - <xsd:element name="ffprobe" type="ffprobe:ffprobeType"/> - - <xsd:complexType name="ffprobeType"> - <xsd:sequence> - <xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" /> - <xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" /> - <xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" /> - <xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" /> - <xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" /> - <xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" /> - <xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" /> - <xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" /> - <xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" /> - </xsd:sequence> - </xsd:complexType> - - <xsd:complexType name="packetsType"> - <xsd:sequence> - <xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/> - </xsd:sequence> - </xsd:complexType> - - <xsd:complexType name="framesType"> - <xsd:sequence> - <xsd:choice minOccurs="0" maxOccurs="unbounded"> - <xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/> - <xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/> - </xsd:choice> - </xsd:sequence> - </xsd:complexType> - - <xsd:complexType name="packetType"> - <xsd:attribute name="codec_type" type="xsd:string" use="required" /> - <xsd:attribute name="stream_index" type="xsd:int" use="required" /> - <xsd:attribute name="pts" type="xsd:long" /> - <xsd:attribute name="pts_time" type="xsd:float" /> - <xsd:attribute name="dts" type="xsd:long" /> - <xsd:attribute name="dts_time" type="xsd:float" /> - <xsd:attribute name="duration" type="xsd:long" /> - <xsd:attribute name="duration_time" type="xsd:float" /> - <xsd:attribute name="convergence_duration" type="xsd:long" /> - <xsd:attribute name="convergence_duration_time" type="xsd:float" /> - <xsd:attribute name="size" type="xsd:long" use="required" /> - <xsd:attribute name="pos" type="xsd:long" /> - <xsd:attribute name="flags" type="xsd:string" use="required" /> - <xsd:attribute name="data" type="xsd:string" /> - </xsd:complexType> - - <xsd:complexType name="frameType"> - <xsd:attribute name="media_type" type="xsd:string" use="required"/> - <xsd:attribute name="key_frame" type="xsd:int" use="required"/> - <xsd:attribute name="pts" type="xsd:long" /> - <xsd:attribute name="pts_time" type="xsd:float"/> - <xsd:attribute name="pkt_pts" type="xsd:long" /> - <xsd:attribute name="pkt_pts_time" type="xsd:float"/> - <xsd:attribute name="pkt_dts" type="xsd:long" /> - <xsd:attribute name="pkt_dts_time" type="xsd:float"/> - <xsd:attribute name="best_effort_timestamp" type="xsd:long" /> - <xsd:attribute name="best_effort_timestamp_time" type="xsd:float" /> - <xsd:attribute name="pkt_duration" type="xsd:long" /> - <xsd:attribute name="pkt_duration_time" type="xsd:float"/> - <xsd:attribute name="pkt_pos" type="xsd:long" /> - <xsd:attribute name="pkt_size" type="xsd:int" /> - - <!-- audio attributes --> - <xsd:attribute name="sample_fmt" type="xsd:string"/> - <xsd:attribute name="nb_samples" type="xsd:long" /> - <xsd:attribute name="channels" type="xsd:int" /> - <xsd:attribute name="channel_layout" type="xsd:string"/> - - <!-- video attributes --> - <xsd:attribute name="width" type="xsd:long" /> - <xsd:attribute name="height" type="xsd:long" /> - <xsd:attribute name="pix_fmt" type="xsd:string"/> - <xsd:attribute name="sample_aspect_ratio" type="xsd:string"/> - <xsd:attribute name="pict_type" type="xsd:string"/> - <xsd:attribute name="coded_picture_number" type="xsd:long" /> - <xsd:attribute name="display_picture_number" type="xsd:long" /> - <xsd:attribute name="interlaced_frame" type="xsd:int" /> - <xsd:attribute name="top_field_first" type="xsd:int" /> - <xsd:attribute name="repeat_pict" type="xsd:int" /> - </xsd:complexType> - - <xsd:complexType name="subtitleType"> - <xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/> - <xsd:attribute name="pts" type="xsd:long" /> - <xsd:attribute name="pts_time" type="xsd:float"/> - <xsd:attribute name="format" type="xsd:int" /> - <xsd:attribute name="start_display_time" type="xsd:int" /> - <xsd:attribute name="end_display_time" type="xsd:int" /> - <xsd:attribute name="num_rects" type="xsd:int" /> - </xsd:complexType> - - <xsd:complexType name="streamsType"> - <xsd:sequence> - <xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/> - </xsd:sequence> - </xsd:complexType> - - <xsd:complexType name="programsType"> - <xsd:sequence> - <xsd:element name="program" type="ffprobe:programType" minOccurs="0" maxOccurs="unbounded"/> - </xsd:sequence> - </xsd:complexType> - - <xsd:complexType name="streamDispositionType"> - <xsd:attribute name="default" type="xsd:int" use="required" /> - <xsd:attribute name="dub" type="xsd:int" use="required" /> - <xsd:attribute name="original" type="xsd:int" use="required" /> - <xsd:attribute name="comment" type="xsd:int" use="required" /> - <xsd:attribute name="lyrics" type="xsd:int" use="required" /> - <xsd:attribute name="karaoke" type="xsd:int" use="required" /> - <xsd:attribute name="forced" type="xsd:int" use="required" /> - <xsd:attribute name="hearing_impaired" type="xsd:int" use="required" /> - <xsd:attribute name="visual_impaired" type="xsd:int" use="required" /> - <xsd:attribute name="clean_effects" type="xsd:int" use="required" /> - <xsd:attribute name="attached_pic" type="xsd:int" use="required" /> - </xsd:complexType> - - <xsd:complexType name="streamType"> - <xsd:sequence> - <xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/> - <xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/> - </xsd:sequence> - - <xsd:attribute name="index" type="xsd:int" use="required"/> - <xsd:attribute name="codec_name" type="xsd:string" /> - <xsd:attribute name="codec_long_name" type="xsd:string" /> - <xsd:attribute name="profile" type="xsd:string" /> - <xsd:attribute name="codec_type" type="xsd:string" /> - <xsd:attribute name="codec_time_base" type="xsd:string" use="required"/> - <xsd:attribute name="codec_tag" type="xsd:string" use="required"/> - <xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/> - <xsd:attribute name="extradata" type="xsd:string" /> - - <!-- video attributes --> - <xsd:attribute name="width" type="xsd:int"/> - <xsd:attribute name="height" type="xsd:int"/> - <xsd:attribute name="has_b_frames" type="xsd:int"/> - <xsd:attribute name="sample_aspect_ratio" type="xsd:string"/> - <xsd:attribute name="display_aspect_ratio" type="xsd:string"/> - <xsd:attribute name="pix_fmt" type="xsd:string"/> - <xsd:attribute name="level" type="xsd:int"/> - <xsd:attribute name="timecode" type="xsd:string"/> - - <!-- audio attributes --> - <xsd:attribute name="sample_fmt" type="xsd:string"/> - <xsd:attribute name="sample_rate" type="xsd:int"/> - <xsd:attribute name="channels" type="xsd:int"/> - <xsd:attribute name="channel_layout" type="xsd:string"/> - <xsd:attribute name="bits_per_sample" type="xsd:int"/> - - <xsd:attribute name="id" type="xsd:string"/> - <xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/> - <xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/> - <xsd:attribute name="time_base" type="xsd:string" use="required"/> - <xsd:attribute name="start_pts" type="xsd:long"/> - <xsd:attribute name="start_time" type="xsd:float"/> - <xsd:attribute name="duration_ts" type="xsd:long"/> - <xsd:attribute name="duration" type="xsd:float"/> - <xsd:attribute name="bit_rate" type="xsd:int"/> - <xsd:attribute name="nb_frames" type="xsd:int"/> - <xsd:attribute name="nb_read_frames" type="xsd:int"/> - <xsd:attribute name="nb_read_packets" type="xsd:int"/> - </xsd:complexType> - - <xsd:complexType name="programType"> - <xsd:sequence> - <xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/> - <xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/> - </xsd:sequence> - - <xsd:attribute name="program_id" type="xsd:int" use="required"/> - <xsd:attribute name="program_num" type="xsd:int" use="required"/> - <xsd:attribute name="nb_streams" type="xsd:int" use="required"/> - <xsd:attribute name="start_time" type="xsd:float"/> - <xsd:attribute name="start_pts" type="xsd:long"/> - <xsd:attribute name="end_time" type="xsd:float"/> - <xsd:attribute name="end_pts" type="xsd:long"/> - <xsd:attribute name="pmt_pid" type="xsd:int" use="required"/> - <xsd:attribute name="pcr_pid" type="xsd:int" use="required"/> - </xsd:complexType> - - <xsd:complexType name="formatType"> - <xsd:sequence> - <xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/> - </xsd:sequence> - - <xsd:attribute name="filename" type="xsd:string" use="required"/> - <xsd:attribute name="nb_streams" type="xsd:int" use="required"/> - <xsd:attribute name="nb_programs" type="xsd:int" use="required"/> - <xsd:attribute name="format_name" type="xsd:string" use="required"/> - <xsd:attribute name="format_long_name" type="xsd:string"/> - <xsd:attribute name="start_time" type="xsd:float"/> - <xsd:attribute name="duration" type="xsd:float"/> - <xsd:attribute name="size" type="xsd:long"/> - <xsd:attribute name="bit_rate" type="xsd:long"/> - <xsd:attribute name="probe_score" type="xsd:int"/> - </xsd:complexType> - - <xsd:complexType name="tagType"> - <xsd:attribute name="key" type="xsd:string" use="required"/> - <xsd:attribute name="value" type="xsd:string" use="required"/> - </xsd:complexType> - - <xsd:complexType name="errorType"> - <xsd:attribute name="code" type="xsd:int" use="required"/> - <xsd:attribute name="string" type="xsd:string" use="required"/> - </xsd:complexType> - - <xsd:complexType name="programVersionType"> - <xsd:attribute name="version" type="xsd:string" use="required"/> - <xsd:attribute name="copyright" type="xsd:string" use="required"/> - <xsd:attribute name="build_date" type="xsd:string" use="required"/> - <xsd:attribute name="build_time" type="xsd:string" use="required"/> - <xsd:attribute name="compiler_type" type="xsd:string" use="required"/> - <xsd:attribute name="compiler_version" type="xsd:string" use="required"/> - <xsd:attribute name="configuration" type="xsd:string" use="required"/> - </xsd:complexType> - - <xsd:complexType name="chaptersType"> - <xsd:sequence> - <xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/> - </xsd:sequence> - </xsd:complexType> - - <xsd:complexType name="chapterType"> - <xsd:sequence> - <xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/> - </xsd:sequence> - - <xsd:attribute name="id" type="xsd:int" use="required"/> - <xsd:attribute name="time_base" type="xsd:string" use="required"/> - <xsd:attribute name="start" type="xsd:int" use="required"/> - <xsd:attribute name="start_time" type="xsd:float"/> - <xsd:attribute name="end" type="xsd:int" use="required"/> - <xsd:attribute name="end_time" type="xsd:float" use="required"/> - </xsd:complexType> - - <xsd:complexType name="libraryVersionType"> - <xsd:attribute name="name" type="xsd:string" use="required"/> - <xsd:attribute name="major" type="xsd:int" use="required"/> - <xsd:attribute name="minor" type="xsd:int" use="required"/> - <xsd:attribute name="micro" type="xsd:int" use="required"/> - <xsd:attribute name="version" type="xsd:int" use="required"/> - <xsd:attribute name="ident" type="xsd:string" use="required"/> - </xsd:complexType> - - <xsd:complexType name="libraryVersionsType"> - <xsd:sequence> - <xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/> - </xsd:sequence> - </xsd:complexType> -</xsd:schema> diff --git a/ffmpeg/doc/ffserver.conf b/ffmpeg/doc/ffserver.conf deleted file mode 100644 index 094c093..0000000 --- a/ffmpeg/doc/ffserver.conf +++ /dev/null @@ -1,371 +0,0 @@ -# Port on which the server is listening. You must select a different -# port from your standard HTTP web server if it is running on the same -# computer. -Port 8090 - -# Address on which the server is bound. Only useful if you have -# several network interfaces. -BindAddress 0.0.0.0 - -# Number of simultaneous HTTP connections that can be handled. It has -# to be defined *before* the MaxClients parameter, since it defines the -# MaxClients maximum limit. -MaxHTTPConnections 2000 - -# Number of simultaneous requests that can be handled. Since FFServer -# is very fast, it is more likely that you will want to leave this high -# and use MaxBandwidth, below. -MaxClients 1000 - -# This the maximum amount of kbit/sec that you are prepared to -# consume when streaming to clients. -MaxBandwidth 1000 - -# Access log file (uses standard Apache log file format) -# '-' is the standard output. -CustomLog - - -################################################################## -# Definition of the live feeds. Each live feed contains one video -# and/or audio sequence coming from an ffmpeg encoder or another -# ffserver. This sequence may be encoded simultaneously with several -# codecs at several resolutions. - -<Feed feed1.ffm> - -# You must use 'ffmpeg' to send a live feed to ffserver. In this -# example, you can type: -# -# ffmpeg http://localhost:8090/feed1.ffm - -# ffserver can also do time shifting. It means that it can stream any -# previously recorded live stream. The request should contain: -# "http://xxxx?date=[YYYY-MM-DDT][[HH:]MM:]SS[.m...]".You must specify -# a path where the feed is stored on disk. You also specify the -# maximum size of the feed, where zero means unlimited. Default: -# File=/tmp/feed_name.ffm FileMaxSize=5M -File /tmp/feed1.ffm -FileMaxSize 200K - -# You could specify -# ReadOnlyFile /saved/specialvideo.ffm -# This marks the file as readonly and it will not be deleted or updated. - -# Specify launch in order to start ffmpeg automatically. -# First ffmpeg must be defined with an appropriate path if needed, -# after that options can follow, but avoid adding the http:// field -#Launch ffmpeg - -# Only allow connections from localhost to the feed. -ACL allow 127.0.0.1 - -</Feed> - - -################################################################## -# Now you can define each stream which will be generated from the -# original audio and video stream. Each format has a filename (here -# 'test1.mpg'). FFServer will send this stream when answering a -# request containing this filename. - -<Stream test1.mpg> - -# coming from live feed 'feed1' -Feed feed1.ffm - -# Format of the stream : you can choose among: -# mpeg : MPEG-1 multiplexed video and audio -# mpegvideo : only MPEG-1 video -# mp2 : MPEG-2 audio (use AudioCodec to select layer 2 and 3 codec) -# ogg : Ogg format (Vorbis audio codec) -# rm : RealNetworks-compatible stream. Multiplexed audio and video. -# ra : RealNetworks-compatible stream. Audio only. -# mpjpeg : Multipart JPEG (works with Netscape without any plugin) -# jpeg : Generate a single JPEG image. -# asf : ASF compatible streaming (Windows Media Player format). -# swf : Macromedia Flash compatible stream -# avi : AVI format (MPEG-4 video, MPEG audio sound) -Format mpeg - -# Bitrate for the audio stream. Codecs usually support only a few -# different bitrates. -AudioBitRate 32 - -# Number of audio channels: 1 = mono, 2 = stereo -AudioChannels 1 - -# Sampling frequency for audio. When using low bitrates, you should -# lower this frequency to 22050 or 11025. The supported frequencies -# depend on the selected audio codec. -AudioSampleRate 44100 - -# Bitrate for the video stream -VideoBitRate 64 - -# Ratecontrol buffer size -VideoBufferSize 40 - -# Number of frames per second -VideoFrameRate 3 - -# Size of the video frame: WxH (default: 160x128) -# The following abbreviations are defined: sqcif, qcif, cif, 4cif, qqvga, -# qvga, vga, svga, xga, uxga, qxga, sxga, qsxga, hsxga, wvga, wxga, wsxga, -# wuxga, woxga, wqsxga, wquxga, whsxga, whuxga, cga, ega, hd480, hd720, -# hd1080 -VideoSize 160x128 - -# Transmit only intra frames (useful for low bitrates, but kills frame rate). -#VideoIntraOnly - -# If non-intra only, an intra frame is transmitted every VideoGopSize -# frames. Video synchronization can only begin at an intra frame. -VideoGopSize 12 - -# More MPEG-4 parameters -# VideoHighQuality -# Video4MotionVector - -# Choose your codecs: -#AudioCodec mp2 -#VideoCodec mpeg1video - -# Suppress audio -#NoAudio - -# Suppress video -#NoVideo - -#VideoQMin 3 -#VideoQMax 31 - -# Set this to the number of seconds backwards in time to start. Note that -# most players will buffer 5-10 seconds of video, and also you need to allow -# for a keyframe to appear in the data stream. -#Preroll 15 - -# ACL: - -# You can allow ranges of addresses (or single addresses) -#ACL ALLOW <first address> <last address> - -# You can deny ranges of addresses (or single addresses) -#ACL DENY <first address> <last address> - -# You can repeat the ACL allow/deny as often as you like. It is on a per -# stream basis. The first match defines the action. If there are no matches, -# then the default is the inverse of the last ACL statement. -# -# Thus 'ACL allow localhost' only allows access from localhost. -# 'ACL deny 1.0.0.0 1.255.255.255' would deny the whole of network 1 and -# allow everybody else. - -</Stream> - - -################################################################## -# Example streams - - -# Multipart JPEG - -#<Stream test.mjpg> -#Feed feed1.ffm -#Format mpjpeg -#VideoFrameRate 2 -#VideoIntraOnly -#NoAudio -#Strict -1 -#</Stream> - - -# Single JPEG - -#<Stream test.jpg> -#Feed feed1.ffm -#Format jpeg -#VideoFrameRate 2 -#VideoIntraOnly -##VideoSize 352x240 -#NoAudio -#Strict -1 -#</Stream> - - -# Flash - -#<Stream test.swf> -#Feed feed1.ffm -#Format swf -#VideoFrameRate 2 -#VideoIntraOnly -#NoAudio -#</Stream> - - -# ASF compatible - -<Stream test.asf> -Feed feed1.ffm -Format asf -VideoFrameRate 15 -VideoSize 352x240 -VideoBitRate 256 -VideoBufferSize 40 -VideoGopSize 30 -AudioBitRate 64 -StartSendOnKey -</Stream> - - -# MP3 audio - -#<Stream test.mp3> -#Feed feed1.ffm -#Format mp2 -#AudioCodec mp3 -#AudioBitRate 64 -#AudioChannels 1 -#AudioSampleRate 44100 -#NoVideo -#</Stream> - - -# Ogg Vorbis audio - -#<Stream test.ogg> -#Feed feed1.ffm -#Metadata title "Stream title" -#AudioBitRate 64 -#AudioChannels 2 -#AudioSampleRate 44100 -#NoVideo -#</Stream> - - -# Real with audio only at 32 kbits - -#<Stream test.ra> -#Feed feed1.ffm -#Format rm -#AudioBitRate 32 -#NoVideo -#NoAudio -#</Stream> - - -# Real with audio and video at 64 kbits - -#<Stream test.rm> -#Feed feed1.ffm -#Format rm -#AudioBitRate 32 -#VideoBitRate 128 -#VideoFrameRate 25 -#VideoGopSize 25 -#NoAudio -#</Stream> - - -################################################################## -# A stream coming from a file: you only need to set the input -# filename and optionally a new format. Supported conversions: -# AVI -> ASF - -#<Stream file.rm> -#File "/usr/local/httpd/htdocs/tlive.rm" -#NoAudio -#</Stream> - -#<Stream file.asf> -#File "/usr/local/httpd/htdocs/test.asf" -#NoAudio -#Metadata author "Me" -#Metadata copyright "Super MegaCorp" -#Metadata title "Test stream from disk" -#Metadata comment "Test comment" -#</Stream> - - -################################################################## -# RTSP examples -# -# You can access this stream with the RTSP URL: -# rtsp://localhost:5454/test1-rtsp.mpg -# -# A non-standard RTSP redirector is also created. Its URL is: -# http://localhost:8090/test1-rtsp.rtsp - -#<Stream test1-rtsp.mpg> -#Format rtp -#File "/usr/local/httpd/htdocs/test1.mpg" -#</Stream> - - -# Transcode an incoming live feed to another live feed, -# using libx264 and video presets - -#<Stream live.h264> -#Format rtp -#Feed feed1.ffm -#VideoCodec libx264 -#VideoFrameRate 24 -#VideoBitRate 100 -#VideoSize 480x272 -#AVPresetVideo default -#AVPresetVideo baseline -#AVOptionVideo flags +global_header -# -#AudioCodec libfaac -#AudioBitRate 32 -#AudioChannels 2 -#AudioSampleRate 22050 -#AVOptionAudio flags +global_header -#</Stream> - -################################################################## -# SDP/multicast examples -# -# If you want to send your stream in multicast, you must set the -# multicast address with MulticastAddress. The port and the TTL can -# also be set. -# -# An SDP file is automatically generated by ffserver by adding the -# 'sdp' extension to the stream name (here -# http://localhost:8090/test1-sdp.sdp). You should usually give this -# file to your player to play the stream. -# -# The 'NoLoop' option can be used to avoid looping when the stream is -# terminated. - -#<Stream test1-sdp.mpg> -#Format rtp -#File "/usr/local/httpd/htdocs/test1.mpg" -#MulticastAddress 224.124.0.1 -#MulticastPort 5000 -#MulticastTTL 16 -#NoLoop -#</Stream> - - -################################################################## -# Special streams - -# Server status - -<Stream stat.html> -Format status - -# Only allow local people to get the status -ACL allow localhost -ACL allow 192.168.0.0 192.168.255.255 - -#FaviconURL http://pond1.gladstonefamily.net:8080/favicon.ico -</Stream> - - -# Redirect index.html to the appropriate site - -<Redirect index.html> -URL http://www.ffmpeg.org/ -</Redirect> diff --git a/ffmpeg/doc/ffserver.texi b/ffmpeg/doc/ffserver.texi deleted file mode 100644 index ed538c1..0000000 --- a/ffmpeg/doc/ffserver.texi +++ /dev/null @@ -1,892 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle ffserver Documentation -@titlepage -@center @titlefont{ffserver Documentation} -@end titlepage - -@top - -@contents - -@chapter Synopsis - -ffserver [@var{options}] - -@chapter Description -@c man begin DESCRIPTION - -@command{ffserver} is a streaming server for both audio and video. -It supports several live feeds, streaming from files and time shifting -on live feeds. You can seek to positions in the past on each live -feed, provided you specify a big enough feed storage. - -@command{ffserver} is configured through a configuration file, which -is read at startup. If not explicitly specified, it will read from -@file{/etc/ffserver.conf}. - -@command{ffserver} receives prerecorded files or FFM streams from some -@command{ffmpeg} instance as input, then streams them over -RTP/RTSP/HTTP. - -An @command{ffserver} instance will listen on some port as specified -in the configuration file. You can launch one or more instances of -@command{ffmpeg} and send one or more FFM streams to the port where -ffserver is expecting to receive them. Alternately, you can make -@command{ffserver} launch such @command{ffmpeg} instances at startup. - -Input streams are called feeds, and each one is specified by a -@code{<Feed>} section in the configuration file. - -For each feed you can have different output streams in various -formats, each one specified by a @code{<Stream>} section in the -configuration file. - -@chapter Detailed description - -@command{ffserver} works by forwarding streams encoded by -@command{ffmpeg}, or pre-recorded streams which are read from disk. - -Precisely, @command{ffserver} acts as an HTTP server, accepting POST -requests from @command{ffmpeg} to acquire the stream to publish, and -serving HTTP clients GET requests with the stream media content. - -A feed is an @ref{FFM} stream created by @command{ffmpeg}, and sent to -a port where @command{ffserver} is listening. - -Each feed is identified by a unique name, corresponding to the name -of the resource published on @command{ffserver}, and is configured by -a dedicated @code{Feed} section in the configuration file. - -The feed publish URL is given by: -@example -http://@var{ffserver_ip_address}:@var{http_port}/@var{feed_name} -@end example - -where @var{ffserver_ip_address} is the IP address of the machine where -@command{ffserver} is installed, @var{http_port} is the port number of -the HTTP server (configured through the @option{Port} option), and -@var{feed_name} is the name of the corresponding feed defined in the -configuration file. - -Each feed is associated to a file which is stored on disk. This stored -file is used to allow to send pre-recorded data to a player as fast as -possible when new content is added in real-time to the stream. - -A "live-stream" or "stream" is a resource published by -@command{ffserver}, and made accessible through the HTTP protocol to -clients. - -A stream can be connected to a feed, or to a file. In the first case, -the published stream is forwarded from the corresponding feed -generated by a running instance of @command{ffmpeg}, in the second -case the stream is read from a pre-recorded file. - -Each stream is identified by a unique name, corresponding to the name -of the resource served by @command{ffserver}, and is configured by -a dedicated @code{Stream} section in the configuration file. - -The stream access URL is given by: -@example -http://@var{ffserver_ip_address}:@var{http_port}/@var{stream_name}[@var{options}] -@end example - -@var{stream_name} is the name of the corresponding stream defined in -the configuration file. @var{options} is a list of options specified -after the URL which affects how the stream is served by -@command{ffserver}. - -In case the stream is associated to a feed, the encoding parameters -must be configured in the stream configuration. They are sent to -@command{ffmpeg} when setting up the encoding. This allows -@command{ffserver} to define the encoding parameters used by -the @command{ffmpeg} encoders. - -The @command{ffmpeg} @option{override_ffserver} commandline option -allows to override the encoding parameters set by the server. - -Multiple streams can be connected to the same feed. - -For example, you can have a situation described by the following -graph: -@example - _________ __________ - | | | | -ffmpeg 1 -----| feed 1 |-----| stream 1 | - \ |_________|\ |__________| - \ \ - \ \ __________ - \ \ | | - \ \| stream 2 | - \ |__________| - \ - \ _________ __________ - \ | | | | - \| feed 2 |-----| stream 3 | - |_________| |__________| - - _________ __________ - | | | | -ffmpeg 2 -----| feed 3 |-----| stream 4 | - |_________| |__________| - - _________ __________ - | | | | - | file 1 |-----| stream 5 | - |_________| |__________| -@end example - -@anchor{FFM} -@section FFM, FFM2 formats - -FFM and FFM2 are formats used by ffserver. They allow storing a wide variety of -video and audio streams and encoding options, and can store a moving time segment -of an infinite movie or a whole movie. - -FFM is version specific, and there is limited compatibility of FFM files -generated by one version of ffmpeg/ffserver and another version of -ffmpeg/ffserver. It may work but it is not guaranteed to work. - -FFM2 is extensible while maintaining compatibility and should work between -differing versions of tools. FFM2 is the default. - -@section Status stream - -@command{ffserver} supports an HTTP interface which exposes the -current status of the server. - -Simply point your browser to the address of the special status stream -specified in the configuration file. - -For example if you have: -@example -<Stream status.html> -Format status - -# Only allow local people to get the status -ACL allow localhost -ACL allow 192.168.0.0 192.168.255.255 -</Stream> -@end example - -then the server will post a page with the status information when -the special stream @file{status.html} is requested. - -@section How do I make it work? - -As a simple test, just run the following two command lines where INPUTFILE -is some file which you can decode with ffmpeg: - -@example -ffserver -f doc/ffserver.conf & -ffmpeg -i INPUTFILE http://localhost:8090/feed1.ffm -@end example - -At this point you should be able to go to your Windows machine and fire up -Windows Media Player (WMP). Go to Open URL and enter - -@example - http://<linuxbox>:8090/test.asf -@end example - -You should (after a short delay) see video and hear audio. - -WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to -transfer the entire file before starting to play. -The same is true of AVI files. - -@section What happens next? - -You should edit the ffserver.conf file to suit your needs (in terms of -frame rates etc). Then install ffserver and ffmpeg, write a script to start -them up, and off you go. - -@section What else can it do? - -You can replay video from .ffm files that was recorded earlier. -However, there are a number of caveats, including the fact that the -ffserver parameters must match the original parameters used to record the -file. If they do not, then ffserver deletes the file before recording into it. -(Now that I write this, it seems broken). - -You can fiddle with many of the codec choices and encoding parameters, and -there are a bunch more parameters that you cannot control. Post a message -to the mailing list if there are some 'must have' parameters. Look in -ffserver.conf for a list of the currently available controls. - -It will automatically generate the ASX or RAM files that are often used -in browsers. These files are actually redirections to the underlying ASF -or RM file. The reason for this is that the browser often fetches the -entire file before starting up the external viewer. The redirection files -are very small and can be transferred quickly. [The stream itself is -often 'infinite' and thus the browser tries to download it and never -finishes.] - -@section Tips - -* When you connect to a live stream, most players (WMP, RA, etc) want to -buffer a certain number of seconds of material so that they can display the -signal continuously. However, ffserver (by default) starts sending data -in realtime. This means that there is a pause of a few seconds while the -buffering is being done by the player. The good news is that this can be -cured by adding a '?buffer=5' to the end of the URL. This means that the -stream should start 5 seconds in the past -- and so the first 5 seconds -of the stream are sent as fast as the network will allow. It will then -slow down to real time. This noticeably improves the startup experience. - -You can also add a 'Preroll 15' statement into the ffserver.conf that will -add the 15 second prebuffering on all requests that do not otherwise -specify a time. In addition, ffserver will skip frames until a key_frame -is found. This further reduces the startup delay by not transferring data -that will be discarded. - -@section Why does the ?buffer / Preroll stop working after a time? - -It turns out that (on my machine at least) the number of frames successfully -grabbed is marginally less than the number that ought to be grabbed. This -means that the timestamp in the encoded data stream gets behind realtime. -This means that if you say 'Preroll 10', then when the stream gets 10 -or more seconds behind, there is no Preroll left. - -Fixing this requires a change in the internals of how timestamps are -handled. - -@section Does the @code{?date=} stuff work. - -Yes (subject to the limitation outlined above). Also note that whenever you -start ffserver, it deletes the ffm file (if any parameters have changed), -thus wiping out what you had recorded before. - -The format of the @code{?date=xxxxxx} is fairly flexible. You should use one -of the following formats (the 'T' is literal): - -@example -* YYYY-MM-DDTHH:MM:SS (localtime) -* YYYY-MM-DDTHH:MM:SSZ (UTC) -@end example - -You can omit the YYYY-MM-DD, and then it refers to the current day. However -note that @samp{?date=16:00:00} refers to 16:00 on the current day -- this -may be in the future and so is unlikely to be useful. - -You use this by adding the ?date= to the end of the URL for the stream. -For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}. -@c man end - -@chapter Options -@c man begin OPTIONS - -@include fftools-common-opts.texi - -@section Main options - -@table @option -@item -f @var{configfile} -Read configuration file @file{configfile}. If not specified it will -read by default from @file{/etc/ffserver.conf}. - -@item -n -Enable no-launch mode. This option disables all the @code{Launch} -directives within the various @code{<Feed>} sections. Since -@command{ffserver} will not launch any @command{ffmpeg} instances, you -will have to launch them manually. - -@item -d -Enable debug mode. This option increases log verbosity, and directs -log messages to stdout. When specified, the @option{CustomLog} option -is ignored. -@end table - -@chapter Configuration file syntax - -@command{ffserver} reads a configuration file containing global -options and settings for each stream and feed. - -The configuration file consists of global options and dedicated -sections, which must be introduced by "<@var{SECTION_NAME} -@var{ARGS}>" on a separate line and must be terminated by a line in -the form "</@var{SECTION_NAME}>". @var{ARGS} is optional. - -Currently the following sections are recognized: @samp{Feed}, -@samp{Stream}, @samp{Redirect}. - -A line starting with @code{#} is ignored and treated as a comment. - -Name of options and sections are case-insensitive. - -@section ACL syntax -An ACL (Access Control List) specifies the address which are allowed -to access a given stream, or to write a given feed. - -It accepts the folling forms -@itemize -@item -Allow/deny access to @var{address}. -@example -ACL ALLOW <address> -ACL DENY <address> -@end example - -@item -Allow/deny access to ranges of addresses from @var{first_address} to -@var{last_address}. -@example -ACL ALLOW <first_address> <last_address> -ACL DENY <first_address> <last_address> -@end example -@end itemize - -You can repeat the ACL allow/deny as often as you like. It is on a per -stream basis. The first match defines the action. If there are no matches, -then the default is the inverse of the last ACL statement. - -Thus 'ACL allow localhost' only allows access from localhost. -'ACL deny 1.0.0.0 1.255.255.255' would deny the whole of network 1 and -allow everybody else. - -@section Global options -@table @option -@item Port @var{port_number} -@item RTSPPort @var{port_number} - -Set TCP port number on which the HTTP/RTSP server is listening. You -must select a different port from your standard HTTP web server if it -is running on the same computer. - -If not specified, no corresponding server will be created. - -@item BindAddress @var{ip_address} -@item RTSPBindAddress @var{ip_address} -Set address on which the HTTP/RTSP server is bound. Only useful if you -have several network interfaces. - -@item MaxHTTPConnections @var{n} -Set number of simultaneous HTTP connections that can be handled. It -has to be defined @emph{before} the @option{MaxClients} parameter, -since it defines the @option{MaxClients} maximum limit. - -Default value is 2000. - -@item MaxClients @var{n} -Set number of simultaneous requests that can be handled. Since -@command{ffserver} is very fast, it is more likely that you will want -to leave this high and use @option{MaxBandwidth}. - -Default value is 5. - -@item MaxBandwidth @var{kbps} -Set the maximum amount of kbit/sec that you are prepared to consume -when streaming to clients. - -Default value is 1000. - -@item CustomLog @var{filename} -Set access log file (uses standard Apache log file format). '-' is the -standard output. - -If not specified @command{ffserver} will produce no log. - -In case the commandline option @option{-d} is specified this option is -ignored, and the log is written to standard output. - -@item NoDaemon -Set no-daemon mode. This option is currently ignored since now -@command{ffserver} will always work in no-daemon mode, and is -deprecated. -@end table - -@section Feed section - -A Feed section defines a feed provided to @command{ffserver}. - -Each live feed contains one video and/or audio sequence coming from an -@command{ffmpeg} encoder or another @command{ffserver}. This sequence -may be encoded simultaneously with several codecs at several -resolutions. - -A feed instance specification is introduced by a line in the form: -@example -<Feed FEED_FILENAME> -@end example - -where @var{FEED_FILENAME} specifies the unique name of the FFM stream. - -The following options are recognized within a Feed section. - -@table @option -@item File @var{filename} -@item ReadOnlyFile @var{filename} -Set the path where the feed file is stored on disk. - -If not specified, the @file{/tmp/FEED.ffm} is assumed, where -@var{FEED} is the feed name. - -If @option{ReadOnlyFile} is used the file is marked as read-only and -it will not be deleted or updated. - -@item Truncate -Truncate the feed file, rather than appending to it. By default -@command{ffserver} will append data to the file, until the maximum -file size value is reached (see @option{FileMaxSize} option). - -@item FileMaxSize @var{size} -Set maximum size of the feed file in bytes. 0 means unlimited. The -postfixes @code{K} (2^10), @code{M} (2^20), and @code{G} (2^30) are -recognized. - -Default value is 5M. - -@item Launch @var{args} -Launch an @command{ffmpeg} command when creating @command{ffserver}. - -@var{args} must be a sequence of arguments to be provided to an -@command{ffmpeg} instance. The first provided argument is ignored, and -it is replaced by a path with the same dirname of the @command{ffserver} -instance, followed by the remaining argument and terminated with a -path corresponding to the feed. - -When the launched process exits, @command{ffserver} will launch -another program instance. - -In case you need a more complex @command{ffmpeg} configuration, -e.g. if you need to generate multiple FFM feeds with a single -@command{ffmpeg} instance, you should launch @command{ffmpeg} by hand. - -This option is ignored in case the commandline option @option{-n} is -specified. - -@item ACL @var{spec} -Specify the list of IP address which are allowed or denied to write -the feed. Multiple ACL options can be specified. -@end table - -@section Stream section - -A Stream section defines a stream provided by @command{ffserver}, and -identified by a single name. - -The stream is sent when answering a request containing the stream -name. - -A stream section must be introduced by the line: -@example -<Stream STREAM_NAME> -@end example - -where @var{STREAM_NAME} specifies the unique name of the stream. - -The following options are recognized within a Stream section. - -Encoding options are marked with the @emph{encoding} tag, and they are -used to set the encoding parameters, and are mapped to libavcodec -encoding options. Not all encoding options are supported, in -particular it is not possible to set encoder private options. In order -to override the encoding options specified by @command{ffserver}, you -can use the @command{ffmpeg} @option{override_ffserver} commandline -option. - -Only one of the @option{Feed} and @option{File} options should be set. - -@table @option -@item Feed @var{feed_name} -Set the input feed. @var{feed_name} must correspond to an existing -feed defined in a @code{Feed} section. - -When this option is set, encoding options are used to setup the -encoding operated by the remote @command{ffmpeg} process. - -@item File @var{filename} -Set the filename of the pre-recorded input file to stream. - -When this option is set, encoding options are ignored and the input -file content is re-streamed as is. - -@item Format @var{format_name} -Set the format of the output stream. - -Must be the name of a format recognized by FFmpeg. If set to -@samp{status}, it is treated as a status stream. - -@item InputFormat @var{format_name} -Set input format. If not specified, it is automatically guessed. - -@item Preroll @var{n} -Set this to the number of seconds backwards in time to start. Note that -most players will buffer 5-10 seconds of video, and also you need to allow -for a keyframe to appear in the data stream. - -Default value is 0. - -@item StartSendOnKey -Do not send stream until it gets the first key frame. By default -@command{ffserver} will send data immediately. - -@item MaxTime @var{n} -Set the number of seconds to run. This value set the maximum duration -of the stream a client will be able to receive. - -A value of 0 means that no limit is set on the stream duration. - -@item ACL @var{spec} -Set ACL for the stream. - -@item DynamicACL @var{spec} - -@item RTSPOption @var{option} - -@item MulticastAddress @var{address} - -@item MulticastPort @var{port} - -@item MulticastTTL @var{integer} - -@item NoLoop - -@item FaviconURL @var{url} -Set favicon (favourite icon) for the server status page. It is ignored -for regular streams. - -@item Author @var{value} -@item Comment @var{value} -@item Copyright @var{value} -@item Title @var{value} -Set metadata corresponding to the option. All these options are -deprecated in favor of @option{Metadata}. - -@item Metadata @var{key} @var{value} -Set metadata value on the output stream. - -@item NoAudio -@item NoVideo -Suppress audio/video. - -@item AudioCodec @var{codec_name} (@emph{encoding,audio}) -Set audio codec. - -@item AudioBitRate @var{rate} (@emph{encoding,audio}) -Set bitrate for the audio stream in kbits per second. - -@item AudioChannels @var{n} (@emph{encoding,audio}) -Set number of audio channels. - -@item AudioSampleRate @var{n} (@emph{encoding,audio}) -Set sampling frequency for audio. When using low bitrates, you should -lower this frequency to 22050 or 11025. The supported frequencies -depend on the selected audio codec. - -@item AVOptionAudio @var{option} @var{value} (@emph{encoding,audio}) -Set generic option for audio stream. - -@item AVPresetAudio @var{preset} (@emph{encoding,audio}) -Set preset for audio stream. - -@item VideoCodec @var{codec_name} (@emph{encoding,video}) -Set video codec. - -@item VideoBitRate @var{n} (@emph{encoding,video}) -Set bitrate for the video stream in kbits per second. - -@item VideoBitRateRange @var{range} (@emph{encoding,video}) -Set video bitrate range. - -A range must be specified in the form @var{minrate}-@var{maxrate}, and -specifies the @option{minrate} and @option{maxrate} encoding options -expressed in kbits per second. - -@item VideoBitRateRangeTolerance @var{n} (@emph{encoding,video}) -Set video bitrate tolerance in kbits per second. - -@item PixelFormat @var{pixel_format} (@emph{encoding,video}) -Set video pixel format. - -@item Debug @var{integer} (@emph{encoding,video}) -Set video @option{debug} encoding option. - -@item Strict @var{integer} (@emph{encoding,video}) -Set video @option{strict} encoding option. - -@item VideoBufferSize @var{n} (@emph{encoding,video}) -Set ratecontrol buffer size, expressed in KB. - -@item VideoFrameRate @var{n} (@emph{encoding,video}) -Set number of video frames per second. - -@item VideoSize (@emph{encoding,video}) -Set size of the video frame, must be an abbreviation or in the form -@var{W}x@var{H}. See @ref{video size syntax,,the Video size section -in the ffmpeg-utils(1) manual,ffmpeg-utils}. - -Default value is @code{160x128}. - -@item VideoIntraOnly (@emph{encoding,video}) -Transmit only intra frames (useful for low bitrates, but kills frame rate). - -@item VideoGopSize @var{n} (@emph{encoding,video}) -If non-intra only, an intra frame is transmitted every VideoGopSize -frames. Video synchronization can only begin at an intra frame. - -@item VideoTag @var{tag} (@emph{encoding,video}) -Set video tag. - -@item VideoHighQuality (@emph{encoding,video}) -@item Video4MotionVector (@emph{encoding,video}) - -@item BitExact (@emph{encoding,video}) -Set bitexact encoding flag. - -@item IdctSimple (@emph{encoding,video}) -Set simple IDCT algorithm. - -@item Qscale @var{n} (@emph{encoding,video}) -Enable constant quality encoding, and set video qscale (quantization -scale) value, expressed in @var{n} QP units. - -@item VideoQMin @var{n} (@emph{encoding,video}) -@item VideoQMax @var{n} (@emph{encoding,video}) -Set video qmin/qmax. - -@item VideoQDiff @var{integer} (@emph{encoding,video}) -Set video @option{qdiff} encoding option. - -@item LumiMask @var{float} (@emph{encoding,video}) -@item DarkMask @var{float} (@emph{encoding,video}) -Set @option{lumi_mask}/@option{dark_mask} encoding options. - -@item AVOptionVideo @var{option} @var{value} (@emph{encoding,video}) -Set generic option for video stream. - -@item AVPresetVideo @var{preset} (@emph{encoding,video}) -Set preset for video stream. - -@var{preset} must be the path of a preset file. -@end table - -@subsection Server status stream - -A server status stream is a special stream which is used to show -statistics about the @command{ffserver} operations. - -It must be specified setting the option @option{Format} to -@samp{status}. - -@section Redirect section - -A redirect section specifies where to redirect the requested URL to -another page. - -A redirect section must be introduced by the line: -@example -<Redirect NAME> -@end example - -where @var{NAME} is the name of the page which should be redirected. - -It only accepts the option @option{URL}, which specify the redirection -URL. - -@chapter Stream examples - -@itemize -@item -Multipart JPEG -@example -<Stream test.mjpg> -Feed feed1.ffm -Format mpjpeg -VideoFrameRate 2 -VideoIntraOnly -NoAudio -Strict -1 -</Stream> -@end example - -@item -Single JPEG -@example -<Stream test.jpg> -Feed feed1.ffm -Format jpeg -VideoFrameRate 2 -VideoIntraOnly -VideoSize 352x240 -NoAudio -Strict -1 -</Stream> -@end example - -@item -Flash -@example -<Stream test.swf> -Feed feed1.ffm -Format swf -VideoFrameRate 2 -VideoIntraOnly -NoAudio -</Stream> -@end example - -@item -ASF compatible -@example -<Stream test.asf> -Feed feed1.ffm -Format asf -VideoFrameRate 15 -VideoSize 352x240 -VideoBitRate 256 -VideoBufferSize 40 -VideoGopSize 30 -AudioBitRate 64 -StartSendOnKey -</Stream> -@end example - -@item -MP3 audio -@example -<Stream test.mp3> -Feed feed1.ffm -Format mp2 -AudioCodec mp3 -AudioBitRate 64 -AudioChannels 1 -AudioSampleRate 44100 -NoVideo -</Stream> -@end example - -@item -Ogg Vorbis audio -@example -<Stream test.ogg> -Feed feed1.ffm -Metadata title "Stream title" -AudioBitRate 64 -AudioChannels 2 -AudioSampleRate 44100 -NoVideo -</Stream> -@end example - -@item -Real with audio only at 32 kbits -@example -<Stream test.ra> -Feed feed1.ffm -Format rm -AudioBitRate 32 -NoVideo -</Stream> -@end example - -@item -Real with audio and video at 64 kbits -@example -<Stream test.rm> -Feed feed1.ffm -Format rm -AudioBitRate 32 -VideoBitRate 128 -VideoFrameRate 25 -VideoGopSize 25 -</Stream> -@end example - -@item -For stream coming from a file: you only need to set the input filename -and optionally a new format. - -@example -<Stream file.rm> -File "/usr/local/httpd/htdocs/tlive.rm" -NoAudio -</Stream> -@end example - -@example -<Stream file.asf> -File "/usr/local/httpd/htdocs/test.asf" -NoAudio -Metadata author "Me" -Metadata copyright "Super MegaCorp" -Metadata title "Test stream from disk" -Metadata comment "Test comment" -</Stream> -@end example -@end itemize - -@c man end - -@include config.texi -@ifset config-all -@ifset config-avutil -@include utils.texi -@end ifset -@ifset config-avcodec -@include codecs.texi -@include bitstream_filters.texi -@end ifset -@ifset config-avformat -@include formats.texi -@include protocols.texi -@end ifset -@ifset config-avdevice -@include devices.texi -@end ifset -@ifset config-swresample -@include resampler.texi -@end ifset -@ifset config-swscale -@include scaler.texi -@end ifset -@ifset config-avfilter -@include filters.texi -@end ifset -@end ifset - -@chapter See Also - -@ifhtml -@ifset config-all -@url{ffserver.html,ffserver}, -@end ifset -@ifset config-not-all -@url{ffserver-all.html,ffserver-all}, -@end ifset -the @file{doc/ffserver.conf} example, -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, -@url{ffmpeg-utils.html,ffmpeg-utils}, -@url{ffmpeg-scaler.html,ffmpeg-scaler}, -@url{ffmpeg-resampler.html,ffmpeg-resampler}, -@url{ffmpeg-codecs.html,ffmpeg-codecs}, -@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters}, -@url{ffmpeg-formats.html,ffmpeg-formats}, -@url{ffmpeg-devices.html,ffmpeg-devices}, -@url{ffmpeg-protocols.html,ffmpeg-protocols}, -@url{ffmpeg-filters.html,ffmpeg-filters} -@end ifhtml - -@ifnothtml -@ifset config-all -ffserver(1), -@end ifset -@ifset config-not-all -ffserver-all(1), -@end ifset -the @file{doc/ffserver.conf} example, ffmpeg(1), ffplay(1), ffprobe(1), -ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1), -ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1), -ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename ffserver -@settitle ffserver video server - -@end ignore - -@bye diff --git a/ffmpeg/doc/filter_design.txt b/ffmpeg/doc/filter_design.txt deleted file mode 100644 index fca24a9..0000000 --- a/ffmpeg/doc/filter_design.txt +++ /dev/null @@ -1,270 +0,0 @@ -Filter design -============= - -This document explains guidelines that should be observed (or ignored with -good reason) when writing filters for libavfilter. - -In this document, the word “frame” indicates either a video frame or a group -of audio samples, as stored in an AVFilterBuffer structure. - - -Format negotiation -================== - - The query_formats method should set, for each input and each output links, - the list of supported formats. - - For video links, that means pixel format. For audio links, that means - channel layout, sample format (the sample packing is implied by the sample - format) and sample rate. - - The lists are not just lists, they are references to shared objects. When - the negotiation mechanism computes the intersection of the formats - supported at each end of a link, all references to both lists are replaced - with a reference to the intersection. And when a single format is - eventually chosen for a link amongst the remaining list, again, all - references to the list are updated. - - That means that if a filter requires that its input and output have the - same format amongst a supported list, all it has to do is use a reference - to the same list of formats. - - query_formats can leave some formats unset and return AVERROR(EAGAIN) to - cause the negotiation mechanism to try again later. That can be used by - filters with complex requirements to use the format negotiated on one link - to set the formats supported on another. - - -Buffer references ownership and permissions -=========================================== - - Principle - --------- - - Audio and video data are voluminous; the buffer and buffer reference - mechanism is intended to avoid, as much as possible, expensive copies of - that data while still allowing the filters to produce correct results. - - The data is stored in buffers represented by AVFilterBuffer structures. - They must not be accessed directly, but through references stored in - AVFilterBufferRef structures. Several references can point to the - same buffer; the buffer is automatically deallocated once all - corresponding references have been destroyed. - - The characteristics of the data (resolution, sample rate, etc.) are - stored in the reference; different references for the same buffer can - show different characteristics. In particular, a video reference can - point to only a part of a video buffer. - - A reference is usually obtained as input to the start_frame or - filter_frame method or requested using the ff_get_video_buffer or - ff_get_audio_buffer functions. A new reference on an existing buffer can - be created with the avfilter_ref_buffer. A reference is destroyed using - the avfilter_unref_bufferp function. - - Reference ownership - ------------------- - - At any time, a reference “belongs” to a particular piece of code, - usually a filter. With a few caveats that will be explained below, only - that piece of code is allowed to access it. It is also responsible for - destroying it, although this is sometimes done automatically (see the - section on link reference fields). - - Here are the (fairly obvious) rules for reference ownership: - - * A reference received by the filter_frame method (or its start_frame - deprecated version) belongs to the corresponding filter. - - Special exception: for video references: the reference may be used - internally for automatic copying and must not be destroyed before - end_frame; it can be given away to ff_start_frame. - - * A reference passed to ff_filter_frame (or the deprecated - ff_start_frame) is given away and must no longer be used. - - * A reference created with avfilter_ref_buffer belongs to the code that - created it. - - * A reference obtained with ff_get_video_buffer or ff_get_audio_buffer - belongs to the code that requested it. - - * A reference given as return value by the get_video_buffer or - get_audio_buffer method is given away and must no longer be used. - - Link reference fields - --------------------- - - The AVFilterLink structure has a few AVFilterBufferRef fields. The - cur_buf and out_buf were used with the deprecated - start_frame/draw_slice/end_frame API and should no longer be used. - src_buf, cur_buf_copy and partial_buf are used by libavfilter internally - and must not be accessed by filters. - - Reference permissions - --------------------- - - The AVFilterBufferRef structure has a perms field that describes what - the code that owns the reference is allowed to do to the buffer data. - Different references for the same buffer can have different permissions. - - For video filters that implement the deprecated - start_frame/draw_slice/end_frame API, the permissions only apply to the - parts of the buffer that have already been covered by the draw_slice - method. - - The value is a binary OR of the following constants: - - * AV_PERM_READ: the owner can read the buffer data; this is essentially - always true and is there for self-documentation. - - * AV_PERM_WRITE: the owner can modify the buffer data. - - * AV_PERM_PRESERVE: the owner can rely on the fact that the buffer data - will not be modified by previous filters. - - * AV_PERM_REUSE: the owner can output the buffer several times, without - modifying the data in between. - - * AV_PERM_REUSE2: the owner can output the buffer several times and - modify the data in between (useless without the WRITE permissions). - - * AV_PERM_ALIGN: the owner can access the data using fast operations - that require data alignment. - - The READ, WRITE and PRESERVE permissions are about sharing the same - buffer between several filters to avoid expensive copies without them - doing conflicting changes on the data. - - The REUSE and REUSE2 permissions are about special memory for direct - rendering. For example a buffer directly allocated in video memory must - not modified once it is displayed on screen, or it will cause tearing; - it will therefore not have the REUSE2 permission. - - The ALIGN permission is about extracting part of the buffer, for - copy-less padding or cropping for example. - - - References received on input pads are guaranteed to have all the - permissions stated in the min_perms field and none of the permissions - stated in the rej_perms. - - References obtained by ff_get_video_buffer and ff_get_audio_buffer are - guaranteed to have at least all the permissions requested as argument. - - References created by avfilter_ref_buffer have the same permissions as - the original reference minus the ones explicitly masked; the mask is - usually ~0 to keep the same permissions. - - Filters should remove permissions on reference they give to output - whenever necessary. It can be automatically done by setting the - rej_perms field on the output pad. - - Here are a few guidelines corresponding to common situations: - - * Filters that modify and forward their frame (like drawtext) need the - WRITE permission. - - * Filters that read their input to produce a new frame on output (like - scale) need the READ permission on input and must request a buffer - with the WRITE permission. - - * Filters that intend to keep a reference after the filtering process - is finished (after filter_frame returns) must have the PRESERVE - permission on it and remove the WRITE permission if they create a new - reference to give it away. - - * Filters that intend to modify a reference they have kept after the end - of the filtering process need the REUSE2 permission and must remove - the PRESERVE permission if they create a new reference to give it - away. - - -Frame scheduling -================ - - The purpose of these rules is to ensure that frames flow in the filter - graph without getting stuck and accumulating somewhere. - - Simple filters that output one frame for each input frame should not have - to worry about it. - - filter_frame - ------------ - - This method is called when a frame is pushed to the filter's input. It - can be called at any time except in a reentrant way. - - If the input frame is enough to produce output, then the filter should - push the output frames on the output link immediately. - - As an exception to the previous rule, if the input frame is enough to - produce several output frames, then the filter needs output only at - least one per link. The additional frames can be left buffered in the - filter; these buffered frames must be flushed immediately if a new input - produces new output. - - (Example: frame rate-doubling filter: filter_frame must (1) flush the - second copy of the previous frame, if it is still there, (2) push the - first copy of the incoming frame, (3) keep the second copy for later.) - - If the input frame is not enough to produce output, the filter must not - call request_frame to get more. It must just process the frame or queue - it. The task of requesting more frames is left to the filter's - request_frame method or the application. - - If a filter has several inputs, the filter must be ready for frames - arriving randomly on any input. Therefore, any filter with several inputs - will most likely require some kind of queuing mechanism. It is perfectly - acceptable to have a limited queue and to drop frames when the inputs - are too unbalanced. - - request_frame - ------------- - - This method is called when a frame is wanted on an output. - - For an input, it should directly call filter_frame on the corresponding - output. - - For a filter, if there are queued frames already ready, one of these - frames should be pushed. If not, the filter should request a frame on - one of its inputs, repeatedly until at least one frame has been pushed. - - Return values: - if request_frame could produce a frame, it should return 0; - if it could not for temporary reasons, it should return AVERROR(EAGAIN); - if it could not because there are no more frames, it should return - AVERROR_EOF. - - The typical implementation of request_frame for a filter with several - inputs will look like that: - - if (frames_queued) { - push_one_frame(); - return 0; - } - while (!frame_pushed) { - input = input_where_a_frame_is_most_needed(); - ret = ff_request_frame(input); - if (ret == AVERROR_EOF) { - process_eof_on_input(); - } else if (ret < 0) { - return ret; - } - } - return 0; - - Note that, except for filters that can have queued frames, request_frame - does not push frames: it requests them to its input, and as a reaction, - the filter_frame method will be called and do the work. - -Legacy API -========== - - Until libavfilter 3.23, the filter_frame method was split: - - - for video filters, it was made of start_frame, draw_slice (that could be - called several times on distinct parts of the frame) and end_frame; - - - for audio filters, it was called filter_samples. diff --git a/ffmpeg/doc/filters.texi b/ffmpeg/doc/filters.texi deleted file mode 100644 index a579964..0000000 --- a/ffmpeg/doc/filters.texi +++ /dev/null @@ -1,10207 +0,0 @@ -@chapter Filtering Introduction -@c man begin FILTERING INTRODUCTION - -Filtering in FFmpeg is enabled through the libavfilter library. - -In libavfilter, a filter can have multiple inputs and multiple -outputs. -To illustrate the sorts of things that are possible, we consider the -following filtergraph. - -@example - [main] -input --> split ---------------------> overlay --> output - | ^ - |[tmp] [flip]| - +-----> crop --> vflip -------+ -@end example - -This filtergraph splits the input stream in two streams, sends one -stream through the crop filter and the vflip filter before merging it -back with the other stream by overlaying it on top. You can use the -following command to achieve this: - -@example -ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT -@end example - -The result will be that in output the top half of the video is mirrored -onto the bottom half. - -Filters in the same linear chain are separated by commas, and distinct -linear chains of filters are separated by semicolons. In our example, -@var{crop,vflip} are in one linear chain, @var{split} and -@var{overlay} are separately in another. The points where the linear -chains join are labelled by names enclosed in square brackets. In the -example, the split filter generates two outputs that are associated to -the labels @var{[main]} and @var{[tmp]}. - -The stream sent to the second output of @var{split}, labelled as -@var{[tmp]}, is processed through the @var{crop} filter, which crops -away the lower half part of the video, and then vertically flipped. The -@var{overlay} filter takes in input the first unchanged output of the -split filter (which was labelled as @var{[main]}), and overlay on its -lower half the output generated by the @var{crop,vflip} filterchain. - -Some filters take in input a list of parameters: they are specified -after the filter name and an equal sign, and are separated from each other -by a colon. - -There exist so-called @var{source filters} that do not have an -audio/video input, and @var{sink filters} that will not have audio/video -output. - -@c man end FILTERING INTRODUCTION - -@chapter graph2dot -@c man begin GRAPH2DOT - -The @file{graph2dot} program included in the FFmpeg @file{tools} -directory can be used to parse a filtergraph description and issue a -corresponding textual representation in the dot language. - -Invoke the command: -@example -graph2dot -h -@end example - -to see how to use @file{graph2dot}. - -You can then pass the dot description to the @file{dot} program (from -the graphviz suite of programs) and obtain a graphical representation -of the filtergraph. - -For example the sequence of commands: -@example -echo @var{GRAPH_DESCRIPTION} | \ -tools/graph2dot -o graph.tmp && \ -dot -Tpng graph.tmp -o graph.png && \ -display graph.png -@end example - -can be used to create and display an image representing the graph -described by the @var{GRAPH_DESCRIPTION} string. Note that this string must be -a complete self-contained graph, with its inputs and outputs explicitly defined. -For example if your command line is of the form: -@example -ffmpeg -i infile -vf scale=640:360 outfile -@end example -your @var{GRAPH_DESCRIPTION} string will need to be of the form: -@example -nullsrc,scale=640:360,nullsink -@end example -you may also need to set the @var{nullsrc} parameters and add a @var{format} -filter in order to simulate a specific input file. - -@c man end GRAPH2DOT - -@chapter Filtergraph description -@c man begin FILTERGRAPH DESCRIPTION - -A filtergraph is a directed graph of connected filters. It can contain -cycles, and there can be multiple links between a pair of -filters. Each link has one input pad on one side connecting it to one -filter from which it takes its input, and one output pad on the other -side connecting it to the one filter accepting its output. - -Each filter in a filtergraph is an instance of a filter class -registered in the application, which defines the features and the -number of input and output pads of the filter. - -A filter with no input pads is called a "source", a filter with no -output pads is called a "sink". - -@anchor{Filtergraph syntax} -@section Filtergraph syntax - -A filtergraph can be represented using a textual representation, which is -recognized by the @option{-filter}/@option{-vf} and @option{-filter_complex} -options in @command{ffmpeg} and @option{-vf} in @command{ffplay}, and by the -@code{avfilter_graph_parse()}/@code{avfilter_graph_parse2()} function defined in -@file{libavfilter/avfilter.h}. - -A filterchain consists of a sequence of connected filters, each one -connected to the previous one in the sequence. A filterchain is -represented by a list of ","-separated filter descriptions. - -A filtergraph consists of a sequence of filterchains. A sequence of -filterchains is represented by a list of ";"-separated filterchain -descriptions. - -A filter is represented by a string of the form: -[@var{in_link_1}]...[@var{in_link_N}]@var{filter_name}=@var{arguments}[@var{out_link_1}]...[@var{out_link_M}] - -@var{filter_name} is the name of the filter class of which the -described filter is an instance of, and has to be the name of one of -the filter classes registered in the program. -The name of the filter class is optionally followed by a string -"=@var{arguments}". - -@var{arguments} is a string which contains the parameters used to -initialize the filter instance. It may have one of the following forms: -@itemize - -@item -A ':'-separated list of @var{key=value} pairs. - -@item -A ':'-separated list of @var{value}. In this case, the keys are assumed to be -the option names in the order they are declared. E.g. the @code{fade} filter -declares three options in this order -- @option{type}, @option{start_frame} and -@option{nb_frames}. Then the parameter list @var{in:0:30} means that the value -@var{in} is assigned to the option @option{type}, @var{0} to -@option{start_frame} and @var{30} to @option{nb_frames}. - -@item -A ':'-separated list of mixed direct @var{value} and long @var{key=value} -pairs. The direct @var{value} must precede the @var{key=value} pairs, and -follow the same constraints order of the previous point. The following -@var{key=value} pairs can be set in any preferred order. - -@end itemize - -If the option value itself is a list of items (e.g. the @code{format} filter -takes a list of pixel formats), the items in the list are usually separated by -'|'. - -The list of arguments can be quoted using the character "'" as initial -and ending mark, and the character '\' for escaping the characters -within the quoted text; otherwise the argument string is considered -terminated when the next special character (belonging to the set -"[]=;,") is encountered. - -The name and arguments of the filter are optionally preceded and -followed by a list of link labels. -A link label allows to name a link and associate it to a filter output -or input pad. The preceding labels @var{in_link_1} -... @var{in_link_N}, are associated to the filter input pads, -the following labels @var{out_link_1} ... @var{out_link_M}, are -associated to the output pads. - -When two link labels with the same name are found in the -filtergraph, a link between the corresponding input and output pad is -created. - -If an output pad is not labelled, it is linked by default to the first -unlabelled input pad of the next filter in the filterchain. -For example in the filterchain: -@example -nullsrc, split[L1], [L2]overlay, nullsink -@end example -the split filter instance has two output pads, and the overlay filter -instance two input pads. The first output pad of split is labelled -"L1", the first input pad of overlay is labelled "L2", and the second -output pad of split is linked to the second input pad of overlay, -which are both unlabelled. - -In a complete filterchain all the unlabelled filter input and output -pads must be connected. A filtergraph is considered valid if all the -filter input and output pads of all the filterchains are connected. - -Libavfilter will automatically insert @ref{scale} filters where format -conversion is required. It is possible to specify swscale flags -for those automatically inserted scalers by prepending -@code{sws_flags=@var{flags};} -to the filtergraph description. - -Follows a BNF description for the filtergraph syntax: -@example -@var{NAME} ::= sequence of alphanumeric characters and '_' -@var{LINKLABEL} ::= "[" @var{NAME} "]" -@var{LINKLABELS} ::= @var{LINKLABEL} [@var{LINKLABELS}] -@var{FILTER_ARGUMENTS} ::= sequence of chars (eventually quoted) -@var{FILTER} ::= [@var{LINKLABELS}] @var{NAME} ["=" @var{FILTER_ARGUMENTS}] [@var{LINKLABELS}] -@var{FILTERCHAIN} ::= @var{FILTER} [,@var{FILTERCHAIN}] -@var{FILTERGRAPH} ::= [sws_flags=@var{flags};] @var{FILTERCHAIN} [;@var{FILTERGRAPH}] -@end example - -@section Notes on filtergraph escaping - -Some filter arguments require the use of special characters, typically -@code{:} to separate key=value pairs in a named options list. In this -case the user should perform a first level escaping when specifying -the filter arguments. For example, consider the following literal -string to be embedded in the @ref{drawtext} filter arguments: -@example -this is a 'string': may contain one, or more, special characters -@end example - -Since @code{:} is special for the filter arguments syntax, it needs to -be escaped, so you get: -@example -text=this is a \'string\'\: may contain one, or more, special characters -@end example - -A second level of escaping is required when embedding the filter -arguments in a filtergraph description, in order to escape all the -filtergraph special characters. Thus the example above becomes: -@example -drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters -@end example - -Finally an additional level of escaping may be needed when writing the -filtergraph description in a shell command, which depends on the -escaping rules of the adopted shell. For example, assuming that -@code{\} is special and needs to be escaped with another @code{\}, the -previous string will finally result in: -@example --vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters" -@end example - -Sometimes, it might be more convenient to employ quoting in place of -escaping. For example the string: -@example -Caesar: tu quoque, Brute, fili mi -@end example - -Can be quoted in the filter arguments as: -@example -text='Caesar: tu quoque, Brute, fili mi' -@end example - -And finally inserted in a filtergraph like: -@example -drawtext=text=\'Caesar: tu quoque\, Brute\, fili mi\' -@end example - -See the ``Quoting and escaping'' section in the ffmpeg-utils manual -for more information about the escaping and quoting rules adopted by -FFmpeg. - -@chapter Timeline editing - -Some filters support a generic @option{enable} option. For the filters -supporting timeline editing, this option can be set to an expression which is -evaluated before sending a frame to the filter. If the evaluation is non-zero, -the filter will be enabled, otherwise the frame will be sent unchanged to the -next filter in the filtergraph. - -The expression accepts the following values: -@table @samp -@item t -timestamp expressed in seconds, NAN if the input timestamp is unknown - -@item n -sequential number of the input frame, starting from 0 - -@item pos -the position in the file of the input frame, NAN if unknown -@end table - -Additionally, these filters support an @option{enable} command that can be used -to re-define the expression. - -Like any other filtering option, the @option{enable} option follows the same -rules. - -For example, to enable a blur filter (@ref{smartblur}) from 10 seconds to 3 -minutes, and a @ref{curves} filter starting at 3 seconds: -@example -smartblur = enable='between(t,10,3*60)', -curves = enable='gte(t,3)' : preset=cross_process -@end example - -@c man end FILTERGRAPH DESCRIPTION - -@chapter Audio Filters -@c man begin AUDIO FILTERS - -When you configure your FFmpeg build, you can disable any of the -existing filters using @code{--disable-filters}. -The configure output will show the audio filters included in your -build. - -Below is a description of the currently available audio filters. - -@section aconvert - -Convert the input audio format to the specified formats. - -@emph{This filter is deprecated. Use @ref{aformat} instead.} - -The filter accepts a string of the form: -"@var{sample_format}:@var{channel_layout}". - -@var{sample_format} specifies the sample format, and can be a string or the -corresponding numeric value defined in @file{libavutil/samplefmt.h}. Use 'p' -suffix for a planar sample format. - -@var{channel_layout} specifies the channel layout, and can be a string -or the corresponding number value defined in @file{libavutil/channel_layout.h}. - -The special parameter "auto", signifies that the filter will -automatically select the output format depending on the output filter. - -@subsection Examples - -@itemize -@item -Convert input to float, planar, stereo: -@example -aconvert=fltp:stereo -@end example - -@item -Convert input to unsigned 8-bit, automatically select out channel layout: -@example -aconvert=u8:auto -@end example -@end itemize - -@section adelay - -Delay one or more audio channels. - -Samples in delayed channel are filled with silence. - -The filter accepts the following option: - -@table @option -@item delays -Set list of delays in milliseconds for each channel separated by '|'. -At least one delay greater than 0 should be provided. -Unused delays will be silently ignored. If number of given delays is -smaller than number of channels all remaining channels will not be delayed. -@end table - -@subsection Examples - -@itemize -@item -Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave -the second channel (and any other channels that may be present) unchanged. -@example -adelay=1500|0|500 -@end example -@end itemize - -@section aecho - -Apply echoing to the input audio. - -Echoes are reflected sound and can occur naturally amongst mountains -(and sometimes large buildings) when talking or shouting; digital echo -effects emulate this behaviour and are often used to help fill out the -sound of a single instrument or vocal. The time difference between the -original signal and the reflection is the @code{delay}, and the -loudness of the reflected signal is the @code{decay}. -Multiple echoes can have different delays and decays. - -A description of the accepted parameters follows. - -@table @option -@item in_gain -Set input gain of reflected signal. Default is @code{0.6}. - -@item out_gain -Set output gain of reflected signal. Default is @code{0.3}. - -@item delays -Set list of time intervals in milliseconds between original signal and reflections -separated by '|'. Allowed range for each @code{delay} is @code{(0 - 90000.0]}. -Default is @code{1000}. - -@item decays -Set list of loudnesses of reflected signals separated by '|'. -Allowed range for each @code{decay} is @code{(0 - 1.0]}. -Default is @code{0.5}. -@end table - -@subsection Examples - -@itemize -@item -Make it sound as if there are twice as many instruments as are actually playing: -@example -aecho=0.8:0.88:60:0.4 -@end example - -@item -If delay is very short, then it sound like a (metallic) robot playing music: -@example -aecho=0.8:0.88:6:0.4 -@end example - -@item -A longer delay will sound like an open air concert in the mountains: -@example -aecho=0.8:0.9:1000:0.3 -@end example - -@item -Same as above but with one more mountain: -@example -aecho=0.8:0.9:1000|1800:0.3|0.25 -@end example -@end itemize - -@section aeval - -Modify an audio signal according to the specified expressions. - -This filter accepts one or more expressions (one for each channel), -which are evaluated and used to modify a corresponding audio signal. - -This filter accepts the following options: - -@table @option -@item exprs -Set the '|'-separated expressions list for each separate channel. If -the number of input channels is greater than the number of -expressions, the last specified expression is used for the remaining -output channels. - -@item channel_layout, c -Set output channel layout. If not specified, the channel layout is -specified by the number of expressions. If set to @samp{same}, it will -use by default the same input channel layout. -@end table - -Each expression in @var{exprs} can contain the following constants and functions: - -@table @option -@item ch -channel number of the current expression - -@item n -number of the evaluated sample, starting from 0 - -@item s -sample rate - -@item t -time of the evaluated sample expressed in seconds - -@item nb_in_channels -@item nb_out_channels -input and output number of channels - -@item val(CH) -the value of input channel with number @var{CH} -@end table - -Note: this filter is slow. For faster processing you should use a -dedicated filter. - -@subsection Examples - -@itemize -@item -Half volume: -@example -aeval=val(ch)/2:c=same -@end example - -@item -Invert phase of the second channel: -@example -eval=val(0)|-val(1) -@end example -@end itemize - -@section afade - -Apply fade-in/out effect to input audio. - -A description of the accepted parameters follows. - -@table @option -@item type, t -Specify the effect type, can be either @code{in} for fade-in, or -@code{out} for a fade-out effect. Default is @code{in}. - -@item start_sample, ss -Specify the number of the start sample for starting to apply the fade -effect. Default is 0. - -@item nb_samples, ns -Specify the number of samples for which the fade effect has to last. At -the end of the fade-in effect the output audio will have the same -volume as the input audio, at the end of the fade-out transition -the output audio will be silence. Default is 44100. - -@item start_time, st -Specify time for starting to apply the fade effect. Default is 0. -The accepted syntax is: -@example -[-]HH[:MM[:SS[.m...]]] -[-]S+[.m...] -@end example -See also the function @code{av_parse_time()}. -If set this option is used instead of @var{start_sample} one. - -@item duration, d -Specify the duration for which the fade effect has to last. Default is 0. -The accepted syntax is: -@example -[-]HH[:MM[:SS[.m...]]] -[-]S+[.m...] -@end example -See also the function @code{av_parse_time()}. -At the end of the fade-in effect the output audio will have the same -volume as the input audio, at the end of the fade-out transition -the output audio will be silence. -If set this option is used instead of @var{nb_samples} one. - -@item curve -Set curve for fade transition. - -It accepts the following values: -@table @option -@item tri -select triangular, linear slope (default) -@item qsin -select quarter of sine wave -@item hsin -select half of sine wave -@item esin -select exponential sine wave -@item log -select logarithmic -@item par -select inverted parabola -@item qua -select quadratic -@item cub -select cubic -@item squ -select square root -@item cbr -select cubic root -@end table -@end table - -@subsection Examples - -@itemize -@item -Fade in first 15 seconds of audio: -@example -afade=t=in:ss=0:d=15 -@end example - -@item -Fade out last 25 seconds of a 900 seconds audio: -@example -afade=t=out:st=875:d=25 -@end example -@end itemize - -@anchor{aformat} -@section aformat - -Set output format constraints for the input audio. The framework will -negotiate the most appropriate format to minimize conversions. - -The filter accepts the following named parameters: -@table @option - -@item sample_fmts -A '|'-separated list of requested sample formats. - -@item sample_rates -A '|'-separated list of requested sample rates. - -@item channel_layouts -A '|'-separated list of requested channel layouts. - -See @ref{channel layout syntax,,the Channel Layout section in the ffmpeg-utils(1) manual,ffmpeg-utils} -for the required syntax. -@end table - -If a parameter is omitted, all values are allowed. - -For example to force the output to either unsigned 8-bit or signed 16-bit stereo: -@example -aformat=sample_fmts=u8|s16:channel_layouts=stereo -@end example - -@section allpass - -Apply a two-pole all-pass filter with central frequency (in Hz) -@var{frequency}, and filter-width @var{width}. -An all-pass filter changes the audio's frequency to phase relationship -without changing its frequency to amplitude relationship. - -The filter accepts the following options: - -@table @option -@item frequency, f -Set frequency in Hz. - -@item width_type -Set method to specify band-width of filter. -@table @option -@item h -Hz -@item q -Q-Factor -@item o -octave -@item s -slope -@end table - -@item width, w -Specify the band-width of a filter in width_type units. -@end table - -@section amerge - -Merge two or more audio streams into a single multi-channel stream. - -The filter accepts the following options: - -@table @option - -@item inputs -Set the number of inputs. Default is 2. - -@end table - -If the channel layouts of the inputs are disjoint, and therefore compatible, -the channel layout of the output will be set accordingly and the channels -will be reordered as necessary. If the channel layouts of the inputs are not -disjoint, the output will have all the channels of the first input then all -the channels of the second input, in that order, and the channel layout of -the output will be the default value corresponding to the total number of -channels. - -For example, if the first input is in 2.1 (FL+FR+LF) and the second input -is FC+BL+BR, then the output will be in 5.1, with the channels in the -following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the -first input, b1 is the first channel of the second input). - -On the other hand, if both input are in stereo, the output channels will be -in the default order: a1, a2, b1, b2, and the channel layout will be -arbitrarily set to 4.0, which may or may not be the expected value. - -All inputs must have the same sample rate, and format. - -If inputs do not have the same duration, the output will stop with the -shortest. - -@subsection Examples - -@itemize -@item -Merge two mono files into a stereo stream: -@example -amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge -@end example - -@item -Multiple merges assuming 1 video stream and 6 audio streams in @file{input.mkv}: -@example -ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv -@end example -@end itemize - -@section amix - -Mixes multiple audio inputs into a single output. - -For example -@example -ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT -@end example -will mix 3 input audio streams to a single output with the same duration as the -first input and a dropout transition time of 3 seconds. - -The filter accepts the following named parameters: -@table @option - -@item inputs -Number of inputs. If unspecified, it defaults to 2. - -@item duration -How to determine the end-of-stream. -@table @option - -@item longest -Duration of longest input. (default) - -@item shortest -Duration of shortest input. - -@item first -Duration of first input. - -@end table - -@item dropout_transition -Transition time, in seconds, for volume renormalization when an input -stream ends. The default value is 2 seconds. - -@end table - -@section anull - -Pass the audio source unchanged to the output. - -@section apad - -Pad the end of a audio stream with silence, this can be used together with --shortest to extend audio streams to the same length as the video stream. - -@section aphaser -Add a phasing effect to the input audio. - -A phaser filter creates series of peaks and troughs in the frequency spectrum. -The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect. - -A description of the accepted parameters follows. - -@table @option -@item in_gain -Set input gain. Default is 0.4. - -@item out_gain -Set output gain. Default is 0.74 - -@item delay -Set delay in milliseconds. Default is 3.0. - -@item decay -Set decay. Default is 0.4. - -@item speed -Set modulation speed in Hz. Default is 0.5. - -@item type -Set modulation type. Default is triangular. - -It accepts the following values: -@table @samp -@item triangular, t -@item sinusoidal, s -@end table -@end table - -@anchor{aresample} -@section aresample - -Resample the input audio to the specified parameters, using the -libswresample library. If none are specified then the filter will -automatically convert between its input and output. - -This filter is also able to stretch/squeeze the audio data to make it match -the timestamps or to inject silence / cut out audio to make it match the -timestamps, do a combination of both or do neither. - -The filter accepts the syntax -[@var{sample_rate}:]@var{resampler_options}, where @var{sample_rate} -expresses a sample rate and @var{resampler_options} is a list of -@var{key}=@var{value} pairs, separated by ":". See the -ffmpeg-resampler manual for the complete list of supported options. - -@subsection Examples - -@itemize -@item -Resample the input audio to 44100Hz: -@example -aresample=44100 -@end example - -@item -Stretch/squeeze samples to the given timestamps, with a maximum of 1000 -samples per second compensation: -@example -aresample=async=1000 -@end example -@end itemize - -@section asetnsamples - -Set the number of samples per each output audio frame. - -The last output packet may contain a different number of samples, as -the filter will flush all the remaining samples when the input audio -signal its end. - -The filter accepts the following options: - -@table @option - -@item nb_out_samples, n -Set the number of frames per each output audio frame. The number is -intended as the number of samples @emph{per each channel}. -Default value is 1024. - -@item pad, p -If set to 1, the filter will pad the last audio frame with zeroes, so -that the last frame will contain the same number of samples as the -previous ones. Default value is 1. -@end table - -For example, to set the number of per-frame samples to 1234 and -disable padding for the last frame, use: -@example -asetnsamples=n=1234:p=0 -@end example - -@section asetrate - -Set the sample rate without altering the PCM data. -This will result in a change of speed and pitch. - -The filter accepts the following options: - -@table @option -@item sample_rate, r -Set the output sample rate. Default is 44100 Hz. -@end table - -@section ashowinfo - -Show a line containing various information for each input audio frame. -The input audio is not modified. - -The shown line contains a sequence of key/value pairs of the form -@var{key}:@var{value}. - -A description of each shown parameter follows: - -@table @option -@item n -sequential number of the input frame, starting from 0 - -@item pts -Presentation timestamp of the input frame, in time base units; the time base -depends on the filter input pad, and is usually 1/@var{sample_rate}. - -@item pts_time -presentation timestamp of the input frame in seconds - -@item pos -position of the frame in the input stream, -1 if this information in -unavailable and/or meaningless (for example in case of synthetic audio) - -@item fmt -sample format - -@item chlayout -channel layout - -@item rate -sample rate for the audio frame - -@item nb_samples -number of samples (per channel) in the frame - -@item checksum -Adler-32 checksum (printed in hexadecimal) of the audio data. For planar audio -the data is treated as if all the planes were concatenated. - -@item plane_checksums -A list of Adler-32 checksums for each data plane. -@end table - -@section astats - -Display time domain statistical information about the audio channels. -Statistics are calculated and displayed for each audio channel and, -where applicable, an overall figure is also given. - -The filter accepts the following option: -@table @option -@item length -Short window length in seconds, used for peak and trough RMS measurement. -Default is @code{0.05} (50 miliseconds). Allowed range is @code{[0.1 - 10]}. -@end table - -A description of each shown parameter follows: - -@table @option -@item DC offset -Mean amplitude displacement from zero. - -@item Min level -Minimal sample level. - -@item Max level -Maximal sample level. - -@item Peak level dB -@item RMS level dB -Standard peak and RMS level measured in dBFS. - -@item RMS peak dB -@item RMS trough dB -Peak and trough values for RMS level measured over a short window. - -@item Crest factor -Standard ratio of peak to RMS level (note: not in dB). - -@item Flat factor -Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels -(i.e. either @var{Min level} or @var{Max level}). - -@item Peak count -Number of occasions (not the number of samples) that the signal attained either -@var{Min level} or @var{Max level}. -@end table - -@section astreamsync - -Forward two audio streams and control the order the buffers are forwarded. - -The filter accepts the following options: - -@table @option -@item expr, e -Set the expression deciding which stream should be -forwarded next: if the result is negative, the first stream is forwarded; if -the result is positive or zero, the second stream is forwarded. It can use -the following variables: - -@table @var -@item b1 b2 -number of buffers forwarded so far on each stream -@item s1 s2 -number of samples forwarded so far on each stream -@item t1 t2 -current timestamp of each stream -@end table - -The default value is @code{t1-t2}, which means to always forward the stream -that has a smaller timestamp. -@end table - -@subsection Examples - -Stress-test @code{amerge} by randomly sending buffers on the wrong -input, while avoiding too much of a desynchronization: -@example -amovie=file.ogg [a] ; amovie=file.mp3 [b] ; -[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ; -[a2] [b2] amerge -@end example - -@section asyncts - -Synchronize audio data with timestamps by squeezing/stretching it and/or -dropping samples/adding silence when needed. - -This filter is not built by default, please use @ref{aresample} to do squeezing/stretching. - -The filter accepts the following named parameters: -@table @option - -@item compensate -Enable stretching/squeezing the data to make it match the timestamps. Disabled -by default. When disabled, time gaps are covered with silence. - -@item min_delta -Minimum difference between timestamps and audio data (in seconds) to trigger -adding/dropping samples. Default value is 0.1. If you get non-perfect sync with -this filter, try setting this parameter to 0. - -@item max_comp -Maximum compensation in samples per second. Relevant only with compensate=1. -Default value 500. - -@item first_pts -Assume the first pts should be this value. The time base is 1 / sample rate. -This allows for padding/trimming at the start of stream. By default, no -assumption is made about the first frame's expected pts, so no padding or -trimming is done. For example, this could be set to 0 to pad the beginning with -silence if an audio stream starts after the video stream or to trim any samples -with a negative pts due to encoder delay. - -@end table - -@section atempo - -Adjust audio tempo. - -The filter accepts exactly one parameter, the audio tempo. If not -specified then the filter will assume nominal 1.0 tempo. Tempo must -be in the [0.5, 2.0] range. - -@subsection Examples - -@itemize -@item -Slow down audio to 80% tempo: -@example -atempo=0.8 -@end example - -@item -To speed up audio to 125% tempo: -@example -atempo=1.25 -@end example -@end itemize - -@section atrim - -Trim the input so that the output contains one continuous subpart of the input. - -This filter accepts the following options: -@table @option -@item start -Specify time of the start of the kept section, i.e. the audio sample -with the timestamp @var{start} will be the first sample in the output. - -@item end -Specify time of the first audio sample that will be dropped, i.e. the -audio sample immediately preceding the one with the timestamp @var{end} will be -the last sample in the output. - -@item start_pts -Same as @var{start}, except this option sets the start timestamp in samples -instead of seconds. - -@item end_pts -Same as @var{end}, except this option sets the end timestamp in samples instead -of seconds. - -@item duration -Specify maximum duration of the output. - -@item start_sample -Number of the first sample that should be passed to output. - -@item end_sample -Number of the first sample that should be dropped. -@end table - -@option{start}, @option{end}, @option{duration} are expressed as time -duration specifications, check the "Time duration" section in the -ffmpeg-utils manual. - -Note that the first two sets of the start/end options and the @option{duration} -option look at the frame timestamp, while the _sample options simply count the -samples that pass through the filter. So start/end_pts and start/end_sample will -give different results when the timestamps are wrong, inexact or do not start at -zero. Also note that this filter does not modify the timestamps. If you wish -that the output timestamps start at zero, insert the asetpts filter after the -atrim filter. - -If multiple start or end options are set, this filter tries to be greedy and -keep all samples that match at least one of the specified constraints. To keep -only the part that matches all the constraints at once, chain multiple atrim -filters. - -The defaults are such that all the input is kept. So it is possible to set e.g. -just the end values to keep everything before the specified time. - -Examples: -@itemize -@item -drop everything except the second minute of input -@example -ffmpeg -i INPUT -af atrim=60:120 -@end example - -@item -keep only the first 1000 samples -@example -ffmpeg -i INPUT -af atrim=end_sample=1000 -@end example - -@end itemize - -@section bandpass - -Apply a two-pole Butterworth band-pass filter with central -frequency @var{frequency}, and (3dB-point) band-width width. -The @var{csg} option selects a constant skirt gain (peak gain = Q) -instead of the default: constant 0dB peak gain. -The filter roll off at 6dB per octave (20dB per decade). - -The filter accepts the following options: - -@table @option -@item frequency, f -Set the filter's central frequency. Default is @code{3000}. - -@item csg -Constant skirt gain if set to 1. Defaults to 0. - -@item width_type -Set method to specify band-width of filter. -@table @option -@item h -Hz -@item q -Q-Factor -@item o -octave -@item s -slope -@end table - -@item width, w -Specify the band-width of a filter in width_type units. -@end table - -@section bandreject - -Apply a two-pole Butterworth band-reject filter with central -frequency @var{frequency}, and (3dB-point) band-width @var{width}. -The filter roll off at 6dB per octave (20dB per decade). - -The filter accepts the following options: - -@table @option -@item frequency, f -Set the filter's central frequency. Default is @code{3000}. - -@item width_type -Set method to specify band-width of filter. -@table @option -@item h -Hz -@item q -Q-Factor -@item o -octave -@item s -slope -@end table - -@item width, w -Specify the band-width of a filter in width_type units. -@end table - -@section bass - -Boost or cut the bass (lower) frequencies of the audio using a two-pole -shelving filter with a response similar to that of a standard -hi-fi's tone-controls. This is also known as shelving equalisation (EQ). - -The filter accepts the following options: - -@table @option -@item gain, g -Give the gain at 0 Hz. Its useful range is about -20 -(for a large cut) to +20 (for a large boost). -Beware of clipping when using a positive gain. - -@item frequency, f -Set the filter's central frequency and so can be used -to extend or reduce the frequency range to be boosted or cut. -The default value is @code{100} Hz. - -@item width_type -Set method to specify band-width of filter. -@table @option -@item h -Hz -@item q -Q-Factor -@item o -octave -@item s -slope -@end table - -@item width, w -Determine how steep is the filter's shelf transition. -@end table - -@section biquad - -Apply a biquad IIR filter with the given coefficients. -Where @var{b0}, @var{b1}, @var{b2} and @var{a0}, @var{a1}, @var{a2} -are the numerator and denominator coefficients respectively. - -@section channelmap - -Remap input channels to new locations. - -This filter accepts the following named parameters: -@table @option -@item channel_layout -Channel layout of the output stream. - -@item map -Map channels from input to output. The argument is a '|'-separated list of -mappings, each in the @code{@var{in_channel}-@var{out_channel}} or -@var{in_channel} form. @var{in_channel} can be either the name of the input -channel (e.g. FL for front left) or its index in the input channel layout. -@var{out_channel} is the name of the output channel or its index in the output -channel layout. If @var{out_channel} is not given then it is implicitly an -index, starting with zero and increasing by one for each mapping. -@end table - -If no mapping is present, the filter will implicitly map input channels to -output channels preserving index. - -For example, assuming a 5.1+downmix input MOV file -@example -ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav -@end example -will create an output WAV file tagged as stereo from the downmix channels of -the input. - -To fix a 5.1 WAV improperly encoded in AAC's native channel order -@example -ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav -@end example - -@section channelsplit - -Split each channel in input audio stream into a separate output stream. - -This filter accepts the following named parameters: -@table @option -@item channel_layout -Channel layout of the input stream. Default is "stereo". -@end table - -For example, assuming a stereo input MP3 file -@example -ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv -@end example -will create an output Matroska file with two audio streams, one containing only -the left channel and the other the right channel. - -To split a 5.1 WAV file into per-channel files -@example -ffmpeg -i in.wav -filter_complex -'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]' --map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]' -front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]' -side_right.wav -@end example - -@section compand - -Compress or expand audio dynamic range. - -A description of the accepted options follows. - -@table @option -@item attacks -@item decays -Set list of times in seconds for each channel over which the instantaneous -level of the input signal is averaged to determine its volume. -@option{attacks} refers to increase of volume and @option{decays} refers -to decrease of volume. -For most situations, the attack time (response to the audio getting louder) -should be shorter than the decay time because the human ear is more sensitive -to sudden loud audio than sudden soft audio. -Typical value for attack is @code{0.3} seconds and for decay @code{0.8} -seconds. - -@item points -Set list of points for transfer function, specified in dB relative to maximum -possible signal amplitude. -Each key points list need to be defined using the following syntax: -@code{x0/y0 x1/y1 x2/y2 ...}. - -The input values must be in strictly increasing order but the transfer -function does not have to be monotonically rising. -The point @code{0/0} is assumed but may be overridden (by @code{0/out-dBn}). -Typical values for the transfer function are @code{-70/-70 -60/-20}. - -@item soft-knee -Set amount for which the points at where adjacent line segments on the -transfer function meet will be rounded. Defaults is @code{0.01}. - -@item gain -Set additional gain in dB to be applied at all points on the transfer function -and allows easy adjustment of the overall gain. -Default is @code{0}. - -@item volume -Set initial volume in dB to be assumed for each channel when filtering starts. -This permits the user to supply a nominal level initially, so that, -for example, a very large gain is not applied to initial signal levels before -the companding has begun to operate. A typical value for audio which is -initially quiet is -90 dB. Default is @code{0}. - -@item delay -Set delay in seconds. Default is @code{0}. The input audio -is analysed immediately, but audio is delayed before being fed to the -volume adjuster. Specifying a delay approximately equal to the attack/decay -times allows the filter to effectively operate in predictive rather than -reactive mode. -@end table - -@subsection Examples -@itemize -@item -Make music with both quiet and loud passages suitable for listening -in a noisy environment: -@example -compand=.3 .3:1 1:-90/-60 -60/-40 -40/-30 -20/-20:6:0:-90:0.2 -@end example - -@item -Noise-gate for when the noise is at a lower level than the signal: -@example -compand=.1 .1:.2 .2:-900/-900 -50.1/-900 -50/-50:.01:0:-90:.1 -@end example - -@item -Here is another noise-gate, this time for when the noise is at a higher level -than the signal (making it, in some ways, similar to squelch): -@example -compand=.1 .1:.1 .1:-45.1/-45.1 -45/-900 0/-900:.01:45:-90:.1 -@end example -@end itemize - -@section earwax - -Make audio easier to listen to on headphones. - -This filter adds `cues' to 44.1kHz stereo (i.e. audio CD format) audio -so that when listened to on headphones the stereo image is moved from -inside your head (standard for headphones) to outside and in front of -the listener (standard for speakers). - -Ported from SoX. - -@section equalizer - -Apply a two-pole peaking equalisation (EQ) filter. With this -filter, the signal-level at and around a selected frequency can -be increased or decreased, whilst (unlike bandpass and bandreject -filters) that at all other frequencies is unchanged. - -In order to produce complex equalisation curves, this filter can -be given several times, each with a different central frequency. - -The filter accepts the following options: - -@table @option -@item frequency, f -Set the filter's central frequency in Hz. - -@item width_type -Set method to specify band-width of filter. -@table @option -@item h -Hz -@item q -Q-Factor -@item o -octave -@item s -slope -@end table - -@item width, w -Specify the band-width of a filter in width_type units. - -@item gain, g -Set the required gain or attenuation in dB. -Beware of clipping when using a positive gain. -@end table - -@section highpass - -Apply a high-pass filter with 3dB point frequency. -The filter can be either single-pole, or double-pole (the default). -The filter roll off at 6dB per pole per octave (20dB per pole per decade). - -The filter accepts the following options: - -@table @option -@item frequency, f -Set frequency in Hz. Default is 3000. - -@item poles, p -Set number of poles. Default is 2. - -@item width_type -Set method to specify band-width of filter. -@table @option -@item h -Hz -@item q -Q-Factor -@item o -octave -@item s -slope -@end table - -@item width, w -Specify the band-width of a filter in width_type units. -Applies only to double-pole filter. -The default is 0.707q and gives a Butterworth response. -@end table - -@section join - -Join multiple input streams into one multi-channel stream. - -The filter accepts the following named parameters: -@table @option - -@item inputs -Number of input streams. Defaults to 2. - -@item channel_layout -Desired output channel layout. Defaults to stereo. - -@item map -Map channels from inputs to output. The argument is a '|'-separated list of -mappings, each in the @code{@var{input_idx}.@var{in_channel}-@var{out_channel}} -form. @var{input_idx} is the 0-based index of the input stream. @var{in_channel} -can be either the name of the input channel (e.g. FL for front left) or its -index in the specified input stream. @var{out_channel} is the name of the output -channel. -@end table - -The filter will attempt to guess the mappings when those are not specified -explicitly. It does so by first trying to find an unused matching input channel -and if that fails it picks the first unused input channel. - -E.g. to join 3 inputs (with properly set channel layouts) -@example -ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT -@end example - -To build a 5.1 output from 6 single-channel streams: -@example -ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex -'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE' -out -@end example - -@section ladspa - -Load a LADSPA (Linux Audio Developer's Simple Plugin API) plugin. - -To enable compilation of this filter you need to configure FFmpeg with -@code{--enable-ladspa}. - -@table @option -@item file, f -Specifies the name of LADSPA plugin library to load. If the environment -variable @env{LADSPA_PATH} is defined, the LADSPA plugin is searched in -each one of the directories specified by the colon separated list in -@env{LADSPA_PATH}, otherwise in the standard LADSPA paths, which are in -this order: @file{HOME/.ladspa/lib/}, @file{/usr/local/lib/ladspa/}, -@file{/usr/lib/ladspa/}. - -@item plugin, p -Specifies the plugin within the library. Some libraries contain only -one plugin, but others contain many of them. If this is not set filter -will list all available plugins within the specified library. - -@item controls, c -Set the '|' separated list of controls which are zero or more floating point -values that determine the behavior of the loaded plugin (for example delay, -threshold or gain). -Controls need to be defined using the following syntax: -c0=@var{value0}|c1=@var{value1}|c2=@var{value2}|..., where -@var{valuei} is the value set on the @var{i}-th control. -If @option{controls} is set to @code{help}, all available controls and -their valid ranges are printed. - -@item sample_rate, s -Specify the sample rate, default to 44100. Only used if plugin have -zero inputs. - -@item nb_samples, n -Set the number of samples per channel per each output frame, default -is 1024. Only used if plugin have zero inputs. - -@item duration, d -Set the minimum duration of the sourced audio. See the function -@code{av_parse_time()} for the accepted format, also check the "Time duration" -section in the ffmpeg-utils manual. -Note that the resulting duration may be greater than the specified duration, -as the generated audio is always cut at the end of a complete frame. -If not specified, or the expressed duration is negative, the audio is -supposed to be generated forever. -Only used if plugin have zero inputs. - -@end table - -@subsection Examples - -@itemize -@item -List all available plugins within amp (LADSPA example plugin) library: -@example -ladspa=file=amp -@end example - -@item -List all available controls and their valid ranges for @code{vcf_notch} -plugin from @code{VCF} library: -@example -ladspa=f=vcf:p=vcf_notch:c=help -@end example - -@item -Simulate low quality audio equipment using @code{Computer Music Toolkit} (CMT) -plugin library: -@example -ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12 -@end example - -@item -Add reverberation to the audio using TAP-plugins -(Tom's Audio Processing plugins): -@example -ladspa=file=tap_reverb:tap_reverb -@end example - -@item -Generate white noise, with 0.2 amplitude: -@example -ladspa=file=cmt:noise_source_white:c=c0=.2 -@end example - -@item -Generate 20 bpm clicks using plugin @code{C* Click - Metronome} from the -@code{C* Audio Plugin Suite} (CAPS) library: -@example -ladspa=file=caps:Click:c=c1=20' -@end example - -@item -Apply @code{C* Eq10X2 - Stereo 10-band equaliser} effect: -@example -ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2 -@end example -@end itemize - -@subsection Commands - -This filter supports the following commands: -@table @option -@item cN -Modify the @var{N}-th control value. - -If the specified value is not valid, it is ignored and prior one is kept. -@end table - -@section lowpass - -Apply a low-pass filter with 3dB point frequency. -The filter can be either single-pole or double-pole (the default). -The filter roll off at 6dB per pole per octave (20dB per pole per decade). - -The filter accepts the following options: - -@table @option -@item frequency, f -Set frequency in Hz. Default is 500. - -@item poles, p -Set number of poles. Default is 2. - -@item width_type -Set method to specify band-width of filter. -@table @option -@item h -Hz -@item q -Q-Factor -@item o -octave -@item s -slope -@end table - -@item width, w -Specify the band-width of a filter in width_type units. -Applies only to double-pole filter. -The default is 0.707q and gives a Butterworth response. -@end table - -@section pan - -Mix channels with specific gain levels. The filter accepts the output -channel layout followed by a set of channels definitions. - -This filter is also designed to remap efficiently the channels of an audio -stream. - -The filter accepts parameters of the form: -"@var{l}:@var{outdef}:@var{outdef}:..." - -@table @option -@item l -output channel layout or number of channels - -@item outdef -output channel specification, of the form: -"@var{out_name}=[@var{gain}*]@var{in_name}[+[@var{gain}*]@var{in_name}...]" - -@item out_name -output channel to define, either a channel name (FL, FR, etc.) or a channel -number (c0, c1, etc.) - -@item gain -multiplicative coefficient for the channel, 1 leaving the volume unchanged - -@item in_name -input channel to use, see out_name for details; it is not possible to mix -named and numbered input channels -@end table - -If the `=' in a channel specification is replaced by `<', then the gains for -that specification will be renormalized so that the total is 1, thus -avoiding clipping noise. - -@subsection Mixing examples - -For example, if you want to down-mix from stereo to mono, but with a bigger -factor for the left channel: -@example -pan=1:c0=0.9*c0+0.1*c1 -@end example - -A customized down-mix to stereo that works automatically for 3-, 4-, 5- and -7-channels surround: -@example -pan=stereo: FL < FL + 0.5*FC + 0.6*BL + 0.6*SL : FR < FR + 0.5*FC + 0.6*BR + 0.6*SR -@end example - -Note that @command{ffmpeg} integrates a default down-mix (and up-mix) system -that should be preferred (see "-ac" option) unless you have very specific -needs. - -@subsection Remapping examples - -The channel remapping will be effective if, and only if: - -@itemize -@item gain coefficients are zeroes or ones, -@item only one input per channel output, -@end itemize - -If all these conditions are satisfied, the filter will notify the user ("Pure -channel mapping detected"), and use an optimized and lossless method to do the -remapping. - -For example, if you have a 5.1 source and want a stereo audio stream by -dropping the extra channels: -@example -pan="stereo: c0=FL : c1=FR" -@end example - -Given the same source, you can also switch front left and front right channels -and keep the input channel layout: -@example -pan="5.1: c0=c1 : c1=c0 : c2=c2 : c3=c3 : c4=c4 : c5=c5" -@end example - -If the input is a stereo audio stream, you can mute the front left channel (and -still keep the stereo channel layout) with: -@example -pan="stereo:c1=c1" -@end example - -Still with a stereo audio stream input, you can copy the right channel in both -front left and right: -@example -pan="stereo: c0=FR : c1=FR" -@end example - -@section replaygain - -ReplayGain scanner filter. This filter takes an audio stream as an input and -outputs it unchanged. -At end of filtering it displays @code{track_gain} and @code{track_peak}. - -@section resample - -Convert the audio sample format, sample rate and channel layout. This filter is -not meant to be used directly. - -@section silencedetect - -Detect silence in an audio stream. - -This filter logs a message when it detects that the input audio volume is less -or equal to a noise tolerance value for a duration greater or equal to the -minimum detected noise duration. - -The printed times and duration are expressed in seconds. - -The filter accepts the following options: - -@table @option -@item duration, d -Set silence duration until notification (default is 2 seconds). - -@item noise, n -Set noise tolerance. Can be specified in dB (in case "dB" is appended to the -specified value) or amplitude ratio. Default is -60dB, or 0.001. -@end table - -@subsection Examples - -@itemize -@item -Detect 5 seconds of silence with -50dB noise tolerance: -@example -silencedetect=n=-50dB:d=5 -@end example - -@item -Complete example with @command{ffmpeg} to detect silence with 0.0001 noise -tolerance in @file{silence.mp3}: -@example -ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null - -@end example -@end itemize - -@section treble - -Boost or cut treble (upper) frequencies of the audio using a two-pole -shelving filter with a response similar to that of a standard -hi-fi's tone-controls. This is also known as shelving equalisation (EQ). - -The filter accepts the following options: - -@table @option -@item gain, g -Give the gain at whichever is the lower of ~22 kHz and the -Nyquist frequency. Its useful range is about -20 (for a large cut) -to +20 (for a large boost). Beware of clipping when using a positive gain. - -@item frequency, f -Set the filter's central frequency and so can be used -to extend or reduce the frequency range to be boosted or cut. -The default value is @code{3000} Hz. - -@item width_type -Set method to specify band-width of filter. -@table @option -@item h -Hz -@item q -Q-Factor -@item o -octave -@item s -slope -@end table - -@item width, w -Determine how steep is the filter's shelf transition. -@end table - -@section volume - -Adjust the input audio volume. - -The filter accepts the following options: - -@table @option - -@item volume -Set audio volume expression. - -Output values are clipped to the maximum value. - -The output audio volume is given by the relation: -@example -@var{output_volume} = @var{volume} * @var{input_volume} -@end example - -Default value for @var{volume} is "1.0". - -@item precision -Set the mathematical precision. - -This determines which input sample formats will be allowed, which affects the -precision of the volume scaling. - -@table @option -@item fixed -8-bit fixed-point; limits input sample format to U8, S16, and S32. -@item float -32-bit floating-point; limits input sample format to FLT. (default) -@item double -64-bit floating-point; limits input sample format to DBL. -@end table - -@item eval -Set when the volume expression is evaluated. - -It accepts the following values: -@table @samp -@item once -only evaluate expression once during the filter initialization, or -when the @samp{volume} command is sent - -@item frame -evaluate expression for each incoming frame -@end table - -Default value is @samp{once}. -@end table - -The volume expression can contain the following parameters. - -@table @option -@item n -frame number (starting at zero) -@item nb_channels -number of channels -@item nb_consumed_samples -number of samples consumed by the filter -@item nb_samples -number of samples in the current frame -@item pos -original frame position in the file -@item pts -frame PTS -@item sample_rate -sample rate -@item startpts -PTS at start of stream -@item startt -time at start of stream -@item t -frame time -@item tb -timestamp timebase -@item volume -last set volume value -@end table - -Note that when @option{eval} is set to @samp{once} only the -@var{sample_rate} and @var{tb} variables are available, all other -variables will evaluate to NAN. - -@subsection Commands - -This filter supports the following commands: -@table @option -@item volume -Modify the volume expression. -The command accepts the same syntax of the corresponding option. - -If the specified expression is not valid, it is kept at its current -value. -@end table - -@subsection Examples - -@itemize -@item -Halve the input audio volume: -@example -volume=volume=0.5 -volume=volume=1/2 -volume=volume=-6.0206dB -@end example - -In all the above example the named key for @option{volume} can be -omitted, for example like in: -@example -volume=0.5 -@end example - -@item -Increase input audio power by 6 decibels using fixed-point precision: -@example -volume=volume=6dB:precision=fixed -@end example - -@item -Fade volume after time 10 with an annihilation period of 5 seconds: -@example -volume='if(lt(t,10),1,max(1-(t-10)/5,0))':eval=frame -@end example -@end itemize - -@section volumedetect - -Detect the volume of the input video. - -The filter has no parameters. The input is not modified. Statistics about -the volume will be printed in the log when the input stream end is reached. - -In particular it will show the mean volume (root mean square), maximum -volume (on a per-sample basis), and the beginning of a histogram of the -registered volume values (from the maximum value to a cumulated 1/1000 of -the samples). - -All volumes are in decibels relative to the maximum PCM value. - -@subsection Examples - -Here is an excerpt of the output: -@example -[Parsed_volumedetect_0 @ 0xa23120] mean_volume: -27 dB -[Parsed_volumedetect_0 @ 0xa23120] max_volume: -4 dB -[Parsed_volumedetect_0 @ 0xa23120] histogram_4db: 6 -[Parsed_volumedetect_0 @ 0xa23120] histogram_5db: 62 -[Parsed_volumedetect_0 @ 0xa23120] histogram_6db: 286 -[Parsed_volumedetect_0 @ 0xa23120] histogram_7db: 1042 -[Parsed_volumedetect_0 @ 0xa23120] histogram_8db: 2551 -[Parsed_volumedetect_0 @ 0xa23120] histogram_9db: 4609 -[Parsed_volumedetect_0 @ 0xa23120] histogram_10db: 8409 -@end example - -It means that: -@itemize -@item -The mean square energy is approximately -27 dB, or 10^-2.7. -@item -The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB. -@item -There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc. -@end itemize - -In other words, raising the volume by +4 dB does not cause any clipping, -raising it by +5 dB causes clipping for 6 samples, etc. - -@c man end AUDIO FILTERS - -@chapter Audio Sources -@c man begin AUDIO SOURCES - -Below is a description of the currently available audio sources. - -@section abuffer - -Buffer audio frames, and make them available to the filter chain. - -This source is mainly intended for a programmatic use, in particular -through the interface defined in @file{libavfilter/asrc_abuffer.h}. - -It accepts the following named parameters: - -@table @option - -@item time_base -Timebase which will be used for timestamps of submitted frames. It must be -either a floating-point number or in @var{numerator}/@var{denominator} form. - -@item sample_rate -The sample rate of the incoming audio buffers. - -@item sample_fmt -The sample format of the incoming audio buffers. -Either a sample format name or its corresponging integer representation from -the enum AVSampleFormat in @file{libavutil/samplefmt.h} - -@item channel_layout -The channel layout of the incoming audio buffers. -Either a channel layout name from channel_layout_map in -@file{libavutil/channel_layout.c} or its corresponding integer representation -from the AV_CH_LAYOUT_* macros in @file{libavutil/channel_layout.h} - -@item channels -The number of channels of the incoming audio buffers. -If both @var{channels} and @var{channel_layout} are specified, then they -must be consistent. - -@end table - -@subsection Examples - -@example -abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo -@end example - -will instruct the source to accept planar 16bit signed stereo at 44100Hz. -Since the sample format with name "s16p" corresponds to the number -6 and the "stereo" channel layout corresponds to the value 0x3, this is -equivalent to: -@example -abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3 -@end example - -@section aevalsrc - -Generate an audio signal specified by an expression. - -This source accepts in input one or more expressions (one for each -channel), which are evaluated and used to generate a corresponding -audio signal. - -This source accepts the following options: - -@table @option -@item exprs -Set the '|'-separated expressions list for each separate channel. In case the -@option{channel_layout} option is not specified, the selected channel layout -depends on the number of provided expressions. Otherwise the last -specified expression is applied to the remaining output channels. - -@item channel_layout, c -Set the channel layout. The number of channels in the specified layout -must be equal to the number of specified expressions. - -@item duration, d -Set the minimum duration of the sourced audio. See the function -@code{av_parse_time()} for the accepted format. -Note that the resulting duration may be greater than the specified -duration, as the generated audio is always cut at the end of a -complete frame. - -If not specified, or the expressed duration is negative, the audio is -supposed to be generated forever. - -@item nb_samples, n -Set the number of samples per channel per each output frame, -default to 1024. - -@item sample_rate, s -Specify the sample rate, default to 44100. -@end table - -Each expression in @var{exprs} can contain the following constants: - -@table @option -@item n -number of the evaluated sample, starting from 0 - -@item t -time of the evaluated sample expressed in seconds, starting from 0 - -@item s -sample rate - -@end table - -@subsection Examples - -@itemize -@item -Generate silence: -@example -aevalsrc=0 -@end example - -@item -Generate a sin signal with frequency of 440 Hz, set sample rate to -8000 Hz: -@example -aevalsrc="sin(440*2*PI*t):s=8000" -@end example - -@item -Generate a two channels signal, specify the channel layout (Front -Center + Back Center) explicitly: -@example -aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC" -@end example - -@item -Generate white noise: -@example -aevalsrc="-2+random(0)" -@end example - -@item -Generate an amplitude modulated signal: -@example -aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)" -@end example - -@item -Generate 2.5 Hz binaural beats on a 360 Hz carrier: -@example -aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)" -@end example - -@end itemize - -@section anullsrc - -Null audio source, return unprocessed audio frames. It is mainly useful -as a template and to be employed in analysis / debugging tools, or as -the source for filters which ignore the input data (for example the sox -synth filter). - -This source accepts the following options: - -@table @option - -@item channel_layout, cl - -Specify the channel layout, and can be either an integer or a string -representing a channel layout. The default value of @var{channel_layout} -is "stereo". - -Check the channel_layout_map definition in -@file{libavutil/channel_layout.c} for the mapping between strings and -channel layout values. - -@item sample_rate, r -Specify the sample rate, and defaults to 44100. - -@item nb_samples, n -Set the number of samples per requested frames. - -@end table - -@subsection Examples - -@itemize -@item -Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO. -@example -anullsrc=r=48000:cl=4 -@end example - -@item -Do the same operation with a more obvious syntax: -@example -anullsrc=r=48000:cl=mono -@end example -@end itemize - -All the parameters need to be explicitly defined. - -@section flite - -Synthesize a voice utterance using the libflite library. - -To enable compilation of this filter you need to configure FFmpeg with -@code{--enable-libflite}. - -Note that the flite library is not thread-safe. - -The filter accepts the following options: - -@table @option - -@item list_voices -If set to 1, list the names of the available voices and exit -immediately. Default value is 0. - -@item nb_samples, n -Set the maximum number of samples per frame. Default value is 512. - -@item textfile -Set the filename containing the text to speak. - -@item text -Set the text to speak. - -@item voice, v -Set the voice to use for the speech synthesis. Default value is -@code{kal}. See also the @var{list_voices} option. -@end table - -@subsection Examples - -@itemize -@item -Read from file @file{speech.txt}, and synthetize the text using the -standard flite voice: -@example -flite=textfile=speech.txt -@end example - -@item -Read the specified text selecting the @code{slt} voice: -@example -flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt -@end example - -@item -Input text to ffmpeg: -@example -ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt -@end example - -@item -Make @file{ffplay} speak the specified text, using @code{flite} and -the @code{lavfi} device: -@example -ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.' -@end example -@end itemize - -For more information about libflite, check: -@url{http://www.speech.cs.cmu.edu/flite/} - -@section sine - -Generate an audio signal made of a sine wave with amplitude 1/8. - -The audio signal is bit-exact. - -The filter accepts the following options: - -@table @option - -@item frequency, f -Set the carrier frequency. Default is 440 Hz. - -@item beep_factor, b -Enable a periodic beep every second with frequency @var{beep_factor} times -the carrier frequency. Default is 0, meaning the beep is disabled. - -@item sample_rate, r -Specify the sample rate, default is 44100. - -@item duration, d -Specify the duration of the generated audio stream. - -@item samples_per_frame -Set the number of samples per output frame, default is 1024. -@end table - -@subsection Examples - -@itemize - -@item -Generate a simple 440 Hz sine wave: -@example -sine -@end example - -@item -Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds: -@example -sine=220:4:d=5 -sine=f=220:b=4:d=5 -sine=frequency=220:beep_factor=4:duration=5 -@end example - -@end itemize - -@c man end AUDIO SOURCES - -@chapter Audio Sinks -@c man begin AUDIO SINKS - -Below is a description of the currently available audio sinks. - -@section abuffersink - -Buffer audio frames, and make them available to the end of filter chain. - -This sink is mainly intended for programmatic use, in particular -through the interface defined in @file{libavfilter/buffersink.h} -or the options system. - -It accepts a pointer to an AVABufferSinkContext structure, which -defines the incoming buffers' formats, to be passed as the opaque -parameter to @code{avfilter_init_filter} for initialization. - -@section anullsink - -Null audio sink, do absolutely nothing with the input audio. It is -mainly useful as a template and to be employed in analysis / debugging -tools. - -@c man end AUDIO SINKS - -@chapter Video Filters -@c man begin VIDEO FILTERS - -When you configure your FFmpeg build, you can disable any of the -existing filters using @code{--disable-filters}. -The configure output will show the video filters included in your -build. - -Below is a description of the currently available video filters. - -@section alphaextract - -Extract the alpha component from the input as a grayscale video. This -is especially useful with the @var{alphamerge} filter. - -@section alphamerge - -Add or replace the alpha component of the primary input with the -grayscale value of a second input. This is intended for use with -@var{alphaextract} to allow the transmission or storage of frame -sequences that have alpha in a format that doesn't support an alpha -channel. - -For example, to reconstruct full frames from a normal YUV-encoded video -and a separate video created with @var{alphaextract}, you might use: -@example -movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out] -@end example - -Since this filter is designed for reconstruction, it operates on frame -sequences without considering timestamps, and terminates when either -input reaches end of stream. This will cause problems if your encoding -pipeline drops frames. If you're trying to apply an image as an -overlay to a video stream, consider the @var{overlay} filter instead. - -@section ass - -Same as the @ref{subtitles} filter, except that it doesn't require libavcodec -and libavformat to work. On the other hand, it is limited to ASS (Advanced -Substation Alpha) subtitles files. - -@section bbox - -Compute the bounding box for the non-black pixels in the input frame -luminance plane. - -This filter computes the bounding box containing all the pixels with a -luminance value greater than the minimum allowed value. -The parameters describing the bounding box are printed on the filter -log. - -The filter accepts the following option: - -@table @option -@item min_val -Set the minimal luminance value. Default is @code{16}. -@end table - -@section blackdetect - -Detect video intervals that are (almost) completely black. Can be -useful to detect chapter transitions, commercials, or invalid -recordings. Output lines contains the time for the start, end and -duration of the detected black interval expressed in seconds. - -In order to display the output lines, you need to set the loglevel at -least to the AV_LOG_INFO value. - -The filter accepts the following options: - -@table @option -@item black_min_duration, d -Set the minimum detected black duration expressed in seconds. It must -be a non-negative floating point number. - -Default value is 2.0. - -@item picture_black_ratio_th, pic_th -Set the threshold for considering a picture "black". -Express the minimum value for the ratio: -@example -@var{nb_black_pixels} / @var{nb_pixels} -@end example - -for which a picture is considered black. -Default value is 0.98. - -@item pixel_black_th, pix_th -Set the threshold for considering a pixel "black". - -The threshold expresses the maximum pixel luminance value for which a -pixel is considered "black". The provided value is scaled according to -the following equation: -@example -@var{absolute_threshold} = @var{luminance_minimum_value} + @var{pixel_black_th} * @var{luminance_range_size} -@end example - -@var{luminance_range_size} and @var{luminance_minimum_value} depend on -the input video format, the range is [0-255] for YUV full-range -formats and [16-235] for YUV non full-range formats. - -Default value is 0.10. -@end table - -The following example sets the maximum pixel threshold to the minimum -value, and detects only black intervals of 2 or more seconds: -@example -blackdetect=d=2:pix_th=0.00 -@end example - -@section blackframe - -Detect frames that are (almost) completely black. Can be useful to -detect chapter transitions or commercials. Output lines consist of -the frame number of the detected frame, the percentage of blackness, -the position in the file if known or -1 and the timestamp in seconds. - -In order to display the output lines, you need to set the loglevel at -least to the AV_LOG_INFO value. - -The filter accepts the following options: - -@table @option - -@item amount -Set the percentage of the pixels that have to be below the threshold, defaults -to @code{98}. - -@item threshold, thresh -Set the threshold below which a pixel value is considered black, defaults to -@code{32}. - -@end table - -@section blend - -Blend two video frames into each other. - -It takes two input streams and outputs one stream, the first input is the -"top" layer and second input is "bottom" layer. -Output terminates when shortest input terminates. - -A description of the accepted options follows. - -@table @option -@item c0_mode -@item c1_mode -@item c2_mode -@item c3_mode -@item all_mode -Set blend mode for specific pixel component or all pixel components in case -of @var{all_mode}. Default value is @code{normal}. - -Available values for component modes are: -@table @samp -@item addition -@item and -@item average -@item burn -@item darken -@item difference -@item divide -@item dodge -@item exclusion -@item hardlight -@item lighten -@item multiply -@item negation -@item normal -@item or -@item overlay -@item phoenix -@item pinlight -@item reflect -@item screen -@item softlight -@item subtract -@item vividlight -@item xor -@end table - -@item c0_opacity -@item c1_opacity -@item c2_opacity -@item c3_opacity -@item all_opacity -Set blend opacity for specific pixel component or all pixel components in case -of @var{all_opacity}. Only used in combination with pixel component blend modes. - -@item c0_expr -@item c1_expr -@item c2_expr -@item c3_expr -@item all_expr -Set blend expression for specific pixel component or all pixel components in case -of @var{all_expr}. Note that related mode options will be ignored if those are set. - -The expressions can use the following variables: - -@table @option -@item N -The sequential number of the filtered frame, starting from @code{0}. - -@item X -@item Y -the coordinates of the current sample - -@item W -@item H -the width and height of currently filtered plane - -@item SW -@item SH -Width and height scale depending on the currently filtered plane. It is the -ratio between the corresponding luma plane number of pixels and the current -plane ones. E.g. for YUV4:2:0 the values are @code{1,1} for the luma plane, and -@code{0.5,0.5} for chroma planes. - -@item T -Time of the current frame, expressed in seconds. - -@item TOP, A -Value of pixel component at current location for first video frame (top layer). - -@item BOTTOM, B -Value of pixel component at current location for second video frame (bottom layer). -@end table - -@item shortest -Force termination when the shortest input terminates. Default is @code{0}. -@item repeatlast -Continue applying the last bottom frame after the end of the stream. A value of -@code{0} disable the filter after the last frame of the bottom layer is reached. -Default is @code{1}. -@end table - -@subsection Examples - -@itemize -@item -Apply transition from bottom layer to top layer in first 10 seconds: -@example -blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))' -@end example - -@item -Apply 1x1 checkerboard effect: -@example -blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)' -@end example - -@item -Apply uncover left effect: -@example -blend=all_expr='if(gte(N*SW+X,W),A,B)' -@end example - -@item -Apply uncover down effect: -@example -blend=all_expr='if(gte(Y-N*SH,0),A,B)' -@end example - -@item -Apply uncover up-left effect: -@example -blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)' -@end example -@end itemize - -@section boxblur - -Apply boxblur algorithm to the input video. - -The filter accepts the following options: - -@table @option - -@item luma_radius, lr -@item luma_power, lp -@item chroma_radius, cr -@item chroma_power, cp -@item alpha_radius, ar -@item alpha_power, ap - -@end table - -A description of the accepted options follows. - -@table @option -@item luma_radius, lr -@item chroma_radius, cr -@item alpha_radius, ar -Set an expression for the box radius in pixels used for blurring the -corresponding input plane. - -The radius value must be a non-negative number, and must not be -greater than the value of the expression @code{min(w,h)/2} for the -luma and alpha planes, and of @code{min(cw,ch)/2} for the chroma -planes. - -Default value for @option{luma_radius} is "2". If not specified, -@option{chroma_radius} and @option{alpha_radius} default to the -corresponding value set for @option{luma_radius}. - -The expressions can contain the following constants: -@table @option -@item w -@item h -the input width and height in pixels - -@item cw -@item ch -the input chroma image width and height in pixels - -@item hsub -@item vsub -horizontal and vertical chroma subsample values. For example for the -pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. -@end table - -@item luma_power, lp -@item chroma_power, cp -@item alpha_power, ap -Specify how many times the boxblur filter is applied to the -corresponding plane. - -Default value for @option{luma_power} is 2. If not specified, -@option{chroma_power} and @option{alpha_power} default to the -corresponding value set for @option{luma_power}. - -A value of 0 will disable the effect. -@end table - -@subsection Examples - -@itemize -@item -Apply a boxblur filter with luma, chroma, and alpha radius -set to 2: -@example -boxblur=luma_radius=2:luma_power=1 -boxblur=2:1 -@end example - -@item -Set luma radius to 2, alpha and chroma radius to 0: -@example -boxblur=2:1:cr=0:ar=0 -@end example - -@item -Set luma and chroma radius to a fraction of the video dimension: -@example -boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1 -@end example -@end itemize - -@section colorbalance -Modify intensity of primary colors (red, green and blue) of input frames. - -The filter allows an input frame to be adjusted in the shadows, midtones or highlights -regions for the red-cyan, green-magenta or blue-yellow balance. - -A positive adjustment value shifts the balance towards the primary color, a negative -value towards the complementary color. - -The filter accepts the following options: - -@table @option -@item rs -@item gs -@item bs -Adjust red, green and blue shadows (darkest pixels). - -@item rm -@item gm -@item bm -Adjust red, green and blue midtones (medium pixels). - -@item rh -@item gh -@item bh -Adjust red, green and blue highlights (brightest pixels). - -Allowed ranges for options are @code{[-1.0, 1.0]}. Defaults are @code{0}. -@end table - -@subsection Examples - -@itemize -@item -Add red color cast to shadows: -@example -colorbalance=rs=.3 -@end example -@end itemize - -@section colorchannelmixer - -Adjust video input frames by re-mixing color channels. - -This filter modifies a color channel by adding the values associated to -the other channels of the same pixels. For example if the value to -modify is red, the output value will be: -@example -@var{red}=@var{red}*@var{rr} + @var{blue}*@var{rb} + @var{green}*@var{rg} + @var{alpha}*@var{ra} -@end example - -The filter accepts the following options: - -@table @option -@item rr -@item rg -@item rb -@item ra -Adjust contribution of input red, green, blue and alpha channels for output red channel. -Default is @code{1} for @var{rr}, and @code{0} for @var{rg}, @var{rb} and @var{ra}. - -@item gr -@item gg -@item gb -@item ga -Adjust contribution of input red, green, blue and alpha channels for output green channel. -Default is @code{1} for @var{gg}, and @code{0} for @var{gr}, @var{gb} and @var{ga}. - -@item br -@item bg -@item bb -@item ba -Adjust contribution of input red, green, blue and alpha channels for output blue channel. -Default is @code{1} for @var{bb}, and @code{0} for @var{br}, @var{bg} and @var{ba}. - -@item ar -@item ag -@item ab -@item aa -Adjust contribution of input red, green, blue and alpha channels for output alpha channel. -Default is @code{1} for @var{aa}, and @code{0} for @var{ar}, @var{ag} and @var{ab}. - -Allowed ranges for options are @code{[-2.0, 2.0]}. -@end table - -@subsection Examples - -@itemize -@item -Convert source to grayscale: -@example -colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3 -@end example -@item -Simulate sepia tones: -@example -colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131 -@end example -@end itemize - -@section colormatrix - -Convert color matrix. - -The filter accepts the following options: - -@table @option -@item src -@item dst -Specify the source and destination color matrix. Both values must be -specified. - -The accepted values are: -@table @samp -@item bt709 -BT.709 - -@item bt601 -BT.601 - -@item smpte240m -SMPTE-240M - -@item fcc -FCC -@end table -@end table - -For example to convert from BT.601 to SMPTE-240M, use the command: -@example -colormatrix=bt601:smpte240m -@end example - -@section copy - -Copy the input source unchanged to the output. Mainly useful for -testing purposes. - -@section crop - -Crop the input video to given dimensions. - -The filter accepts the following options: - -@table @option -@item w, out_w -Width of the output video. It defaults to @code{iw}. -This expression is evaluated only once during the filter -configuration. - -@item h, out_h -Height of the output video. It defaults to @code{ih}. -This expression is evaluated only once during the filter -configuration. - -@item x -Horizontal position, in the input video, of the left edge of the output video. -It defaults to @code{(in_w-out_w)/2}. -This expression is evaluated per-frame. - -@item y -Vertical position, in the input video, of the top edge of the output video. -It defaults to @code{(in_h-out_h)/2}. -This expression is evaluated per-frame. - -@item keep_aspect -If set to 1 will force the output display aspect ratio -to be the same of the input, by changing the output sample aspect -ratio. It defaults to 0. -@end table - -The @var{out_w}, @var{out_h}, @var{x}, @var{y} parameters are -expressions containing the following constants: - -@table @option -@item x -@item y -the computed values for @var{x} and @var{y}. They are evaluated for -each new frame. - -@item in_w -@item in_h -the input width and height - -@item iw -@item ih -same as @var{in_w} and @var{in_h} - -@item out_w -@item out_h -the output (cropped) width and height - -@item ow -@item oh -same as @var{out_w} and @var{out_h} - -@item a -same as @var{iw} / @var{ih} - -@item sar -input sample aspect ratio - -@item dar -input display aspect ratio, it is the same as (@var{iw} / @var{ih}) * @var{sar} - -@item hsub -@item vsub -horizontal and vertical chroma subsample values. For example for the -pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. - -@item n -the number of input frame, starting from 0 - -@item pos -the position in the file of the input frame, NAN if unknown - -@item t -timestamp expressed in seconds, NAN if the input timestamp is unknown - -@end table - -The expression for @var{out_w} may depend on the value of @var{out_h}, -and the expression for @var{out_h} may depend on @var{out_w}, but they -cannot depend on @var{x} and @var{y}, as @var{x} and @var{y} are -evaluated after @var{out_w} and @var{out_h}. - -The @var{x} and @var{y} parameters specify the expressions for the -position of the top-left corner of the output (non-cropped) area. They -are evaluated for each frame. If the evaluated value is not valid, it -is approximated to the nearest valid value. - -The expression for @var{x} may depend on @var{y}, and the expression -for @var{y} may depend on @var{x}. - -@subsection Examples - -@itemize -@item -Crop area with size 100x100 at position (12,34). -@example -crop=100:100:12:34 -@end example - -Using named options, the example above becomes: -@example -crop=w=100:h=100:x=12:y=34 -@end example - -@item -Crop the central input area with size 100x100: -@example -crop=100:100 -@end example - -@item -Crop the central input area with size 2/3 of the input video: -@example -crop=2/3*in_w:2/3*in_h -@end example - -@item -Crop the input video central square: -@example -crop=out_w=in_h -crop=in_h -@end example - -@item -Delimit the rectangle with the top-left corner placed at position -100:100 and the right-bottom corner corresponding to the right-bottom -corner of the input image: -@example -crop=in_w-100:in_h-100:100:100 -@end example - -@item -Crop 10 pixels from the left and right borders, and 20 pixels from -the top and bottom borders -@example -crop=in_w-2*10:in_h-2*20 -@end example - -@item -Keep only the bottom right quarter of the input image: -@example -crop=in_w/2:in_h/2:in_w/2:in_h/2 -@end example - -@item -Crop height for getting Greek harmony: -@example -crop=in_w:1/PHI*in_w -@end example - -@item -Appply trembling effect: -@example -crop=in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(n/10):(in_h-out_h)/2 +((in_h-out_h)/2)*sin(n/7) -@end example - -@item -Apply erratic camera effect depending on timestamp: -@example -crop=in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(t*10):(in_h-out_h)/2 +((in_h-out_h)/2)*sin(t*13)" -@end example - -@item -Set x depending on the value of y: -@example -crop=in_w/2:in_h/2:y:10+10*sin(n/10) -@end example -@end itemize - -@section cropdetect - -Auto-detect crop size. - -Calculate necessary cropping parameters and prints the recommended -parameters through the logging system. The detected dimensions -correspond to the non-black area of the input video. - -The filter accepts the following options: - -@table @option - -@item limit -Set higher black value threshold, which can be optionally specified -from nothing (0) to everything (255). An intensity value greater -to the set value is considered non-black. Default value is 24. - -@item round -Set the value for which the width/height should be divisible by. The -offset is automatically adjusted to center the video. Use 2 to get -only even dimensions (needed for 4:2:2 video). 16 is best when -encoding to most video codecs. Default value is 16. - -@item reset_count, reset -Set the counter that determines after how many frames cropdetect will -reset the previously detected largest video area and start over to -detect the current optimal crop area. Default value is 0. - -This can be useful when channel logos distort the video area. 0 -indicates never reset and return the largest area encountered during -playback. -@end table - -@anchor{curves} -@section curves - -Apply color adjustments using curves. - -This filter is similar to the Adobe Photoshop and GIMP curves tools. Each -component (red, green and blue) has its values defined by @var{N} key points -tied from each other using a smooth curve. The x-axis represents the pixel -values from the input frame, and the y-axis the new pixel values to be set for -the output frame. - -By default, a component curve is defined by the two points @var{(0;0)} and -@var{(1;1)}. This creates a straight line where each original pixel value is -"adjusted" to its own value, which means no change to the image. - -The filter allows you to redefine these two points and add some more. A new -curve (using a natural cubic spline interpolation) will be define to pass -smoothly through all these new coordinates. The new defined points needs to be -strictly increasing over the x-axis, and their @var{x} and @var{y} values must -be in the @var{[0;1]} interval. If the computed curves happened to go outside -the vector spaces, the values will be clipped accordingly. - -If there is no key point defined in @code{x=0}, the filter will automatically -insert a @var{(0;0)} point. In the same way, if there is no key point defined -in @code{x=1}, the filter will automatically insert a @var{(1;1)} point. - -The filter accepts the following options: - -@table @option -@item preset -Select one of the available color presets. This option can be used in addition -to the @option{r}, @option{g}, @option{b} parameters; in this case, the later -options takes priority on the preset values. -Available presets are: -@table @samp -@item none -@item color_negative -@item cross_process -@item darker -@item increase_contrast -@item lighter -@item linear_contrast -@item medium_contrast -@item negative -@item strong_contrast -@item vintage -@end table -Default is @code{none}. -@item master, m -Set the master key points. These points will define a second pass mapping. It -is sometimes called a "luminance" or "value" mapping. It can be used with -@option{r}, @option{g}, @option{b} or @option{all} since it acts like a -post-processing LUT. -@item red, r -Set the key points for the red component. -@item green, g -Set the key points for the green component. -@item blue, b -Set the key points for the blue component. -@item all -Set the key points for all components (not including master). -Can be used in addition to the other key points component -options. In this case, the unset component(s) will fallback on this -@option{all} setting. -@item psfile -Specify a Photoshop curves file (@code{.asv}) to import the settings from. -@end table - -To avoid some filtergraph syntax conflicts, each key points list need to be -defined using the following syntax: @code{x0/y0 x1/y1 x2/y2 ...}. - -@subsection Examples - -@itemize -@item -Increase slightly the middle level of blue: -@example -curves=blue='0.5/0.58' -@end example - -@item -Vintage effect: -@example -curves=r='0/0.11 .42/.51 1/0.95':g='0.50/0.48':b='0/0.22 .49/.44 1/0.8' -@end example -Here we obtain the following coordinates for each components: -@table @var -@item red -@code{(0;0.11) (0.42;0.51) (1;0.95)} -@item green -@code{(0;0) (0.50;0.48) (1;1)} -@item blue -@code{(0;0.22) (0.49;0.44) (1;0.80)} -@end table - -@item -The previous example can also be achieved with the associated built-in preset: -@example -curves=preset=vintage -@end example - -@item -Or simply: -@example -curves=vintage -@end example - -@item -Use a Photoshop preset and redefine the points of the green component: -@example -curves=psfile='MyCurvesPresets/purple.asv':green='0.45/0.53' -@end example -@end itemize - -@section dctdnoiz - -Denoise frames using 2D DCT (frequency domain filtering). - -This filter is not designed for real time and can be extremely slow. - -The filter accepts the following options: - -@table @option -@item sigma, s -Set the noise sigma constant. - -This @var{sigma} defines a hard threshold of @code{3 * sigma}; every DCT -coefficient (absolute value) below this threshold with be dropped. - -If you need a more advanced filtering, see @option{expr}. - -Default is @code{0}. - -@item overlap -Set number overlapping pixels for each block. Each block is of size -@code{16x16}. Since the filter can be slow, you may want to reduce this value, -at the cost of a less effective filter and the risk of various artefacts. - -If the overlapping value doesn't allow to process the whole input width or -height, a warning will be displayed and according borders won't be denoised. - -Default value is @code{15}. - -@item expr, e -Set the coefficient factor expression. - -For each coefficient of a DCT block, this expression will be evaluated as a -multiplier value for the coefficient. - -If this is option is set, the @option{sigma} option will be ignored. - -The absolute value of the coefficient can be accessed through the @var{c} -variable. -@end table - -@subsection Examples - -Apply a denoise with a @option{sigma} of @code{4.5}: -@example -dctdnoiz=4.5 -@end example - -The same operation can be achieved using the expression system: -@example -dctdnoiz=e='gte(c, 4.5*3)' -@end example - -@anchor{decimate} -@section decimate - -Drop duplicated frames at regular intervals. - -The filter accepts the following options: - -@table @option -@item cycle -Set the number of frames from which one will be dropped. Setting this to -@var{N} means one frame in every batch of @var{N} frames will be dropped. -Default is @code{5}. - -@item dupthresh -Set the threshold for duplicate detection. If the difference metric for a frame -is less than or equal to this value, then it is declared as duplicate. Default -is @code{1.1} - -@item scthresh -Set scene change threshold. Default is @code{15}. - -@item blockx -@item blocky -Set the size of the x and y-axis blocks used during metric calculations. -Larger blocks give better noise suppression, but also give worse detection of -small movements. Must be a power of two. Default is @code{32}. - -@item ppsrc -Mark main input as a pre-processed input and activate clean source input -stream. This allows the input to be pre-processed with various filters to help -the metrics calculation while keeping the frame selection lossless. When set to -@code{1}, the first stream is for the pre-processed input, and the second -stream is the clean source from where the kept frames are chosen. Default is -@code{0}. - -@item chroma -Set whether or not chroma is considered in the metric calculations. Default is -@code{1}. -@end table - -@section delogo - -Suppress a TV station logo by a simple interpolation of the surrounding -pixels. Just set a rectangle covering the logo and watch it disappear -(and sometimes something even uglier appear - your mileage may vary). - -This filter accepts the following options: -@table @option - -@item x -@item y -Specify the top left corner coordinates of the logo. They must be -specified. - -@item w -@item h -Specify the width and height of the logo to clear. They must be -specified. - -@item band, t -Specify the thickness of the fuzzy edge of the rectangle (added to -@var{w} and @var{h}). The default value is 4. - -@item show -When set to 1, a green rectangle is drawn on the screen to simplify -finding the right @var{x}, @var{y}, @var{w}, and @var{h} parameters. -The default value is 0. - -The rectangle is drawn on the outermost pixels which will be (partly) -replaced with interpolated values. The values of the next pixels -immediately outside this rectangle in each direction will be used to -compute the interpolated pixel values inside the rectangle. - -@end table - -@subsection Examples - -@itemize -@item -Set a rectangle covering the area with top left corner coordinates 0,0 -and size 100x77, setting a band of size 10: -@example -delogo=x=0:y=0:w=100:h=77:band=10 -@end example - -@end itemize - -@section deshake - -Attempt to fix small changes in horizontal and/or vertical shift. This -filter helps remove camera shake from hand-holding a camera, bumping a -tripod, moving on a vehicle, etc. - -The filter accepts the following options: - -@table @option - -@item x -@item y -@item w -@item h -Specify a rectangular area where to limit the search for motion -vectors. -If desired the search for motion vectors can be limited to a -rectangular area of the frame defined by its top left corner, width -and height. These parameters have the same meaning as the drawbox -filter which can be used to visualise the position of the bounding -box. - -This is useful when simultaneous movement of subjects within the frame -might be confused for camera motion by the motion vector search. - -If any or all of @var{x}, @var{y}, @var{w} and @var{h} are set to -1 -then the full frame is used. This allows later options to be set -without specifying the bounding box for the motion vector search. - -Default - search the whole frame. - -@item rx -@item ry -Specify the maximum extent of movement in x and y directions in the -range 0-64 pixels. Default 16. - -@item edge -Specify how to generate pixels to fill blanks at the edge of the -frame. Available values are: -@table @samp -@item blank, 0 -Fill zeroes at blank locations -@item original, 1 -Original image at blank locations -@item clamp, 2 -Extruded edge value at blank locations -@item mirror, 3 -Mirrored edge at blank locations -@end table -Default value is @samp{mirror}. - -@item blocksize -Specify the blocksize to use for motion search. Range 4-128 pixels, -default 8. - -@item contrast -Specify the contrast threshold for blocks. Only blocks with more than -the specified contrast (difference between darkest and lightest -pixels) will be considered. Range 1-255, default 125. - -@item search -Specify the search strategy. Available values are: -@table @samp -@item exhaustive, 0 -Set exhaustive search -@item less, 1 -Set less exhaustive search. -@end table -Default value is @samp{exhaustive}. - -@item filename -If set then a detailed log of the motion search is written to the -specified file. - -@item opencl -If set to 1, specify using OpenCL capabilities, only available if -FFmpeg was configured with @code{--enable-opencl}. Default value is 0. - -@end table - -@section drawbox - -Draw a colored box on the input image. - -This filter accepts the following options: - -@table @option -@item x -@item y -The expressions which specify the top left corner coordinates of the box. Default to 0. - -@item width, w -@item height, h -The expressions which specify the width and height of the box, if 0 they are interpreted as -the input width and height. Default to 0. - -@item color, c -Specify the color of the box to write. For the general syntax of this option, -check the "Color" section in the ffmpeg-utils manual. If the special -value @code{invert} is used, the box edge color is the same as the -video with inverted luma. - -@item thickness, t -The expression which sets the thickness of the box edge. Default value is @code{3}. - -See below for the list of accepted constants. -@end table - -The parameters for @var{x}, @var{y}, @var{w} and @var{h} and @var{t} are expressions containing the -following constants: - -@table @option -@item dar -The input display aspect ratio, it is the same as (@var{w} / @var{h}) * @var{sar}. - -@item hsub -@item vsub -horizontal and vertical chroma subsample values. For example for the -pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. - -@item in_h, ih -@item in_w, iw -The input width and height. - -@item sar -The input sample aspect ratio. - -@item x -@item y -The x and y offset coordinates where the box is drawn. - -@item w -@item h -The width and height of the drawn box. - -@item t -The thickness of the drawn box. - -These constants allow the @var{x}, @var{y}, @var{w}, @var{h} and @var{t} expressions to refer to -each other, so you may for example specify @code{y=x/dar} or @code{h=w/dar}. - -@end table - -@subsection Examples - -@itemize -@item -Draw a black box around the edge of the input image: -@example -drawbox -@end example - -@item -Draw a box with color red and an opacity of 50%: -@example -drawbox=10:20:200:60:red@@0.5 -@end example - -The previous example can be specified as: -@example -drawbox=x=10:y=20:w=200:h=60:color=red@@0.5 -@end example - -@item -Fill the box with pink color: -@example -drawbox=x=10:y=10:w=100:h=100:color=pink@@0.5:t=max -@end example - -@item -Draw a 2-pixel red 2.40:1 mask: -@example -drawbox=x=-t:y=0.5*(ih-iw/2.4)-t:w=iw+t*2:h=iw/2.4+t*2:t=2:c=red -@end example -@end itemize - -@section drawgrid - -Draw a grid on the input image. - -This filter accepts the following options: - -@table @option -@item x -@item y -The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0. - -@item width, w -@item height, h -The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the -input width and height, respectively, minus @code{thickness}, so image gets -framed. Default to 0. - -@item color, c -Specify the color of the grid. For the general syntax of this option, -check the "Color" section in the ffmpeg-utils manual. If the special -value @code{invert} is used, the grid color is the same as the -video with inverted luma. - -@item thickness, t -The expression which sets the thickness of the grid line. Default value is @code{1}. - -See below for the list of accepted constants. -@end table - -The parameters for @var{x}, @var{y}, @var{w} and @var{h} and @var{t} are expressions containing the -following constants: - -@table @option -@item dar -The input display aspect ratio, it is the same as (@var{w} / @var{h}) * @var{sar}. - -@item hsub -@item vsub -horizontal and vertical chroma subsample values. For example for the -pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. - -@item in_h, ih -@item in_w, iw -The input grid cell width and height. - -@item sar -The input sample aspect ratio. - -@item x -@item y -The x and y coordinates of some point of grid intersection (meant to configure offset). - -@item w -@item h -The width and height of the drawn cell. - -@item t -The thickness of the drawn cell. - -These constants allow the @var{x}, @var{y}, @var{w}, @var{h} and @var{t} expressions to refer to -each other, so you may for example specify @code{y=x/dar} or @code{h=w/dar}. - -@end table - -@subsection Examples - -@itemize -@item -Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%: -@example -drawgrid=width=100:height=100:thickness=2:color=red@@0.5 -@end example - -@item -Draw a white 3x3 grid with an opacity of 50%: -@example -drawgrid=w=iw/3:h=ih/3:t=2:c=white@@0.5 -@end example -@end itemize - -@anchor{drawtext} -@section drawtext - -Draw text string or text from specified file on top of video using the -libfreetype library. - -To enable compilation of this filter you need to configure FFmpeg with -@code{--enable-libfreetype}. - -@subsection Syntax - -The description of the accepted parameters follows. - -@table @option - -@item box -Used to draw a box around text using background color. -Value should be either 1 (enable) or 0 (disable). -The default value of @var{box} is 0. - -@item boxcolor -The color to be used for drawing box around text. For the syntax of this -option, check the "Color" section in the ffmpeg-utils manual. - -The default value of @var{boxcolor} is "white". - -@item expansion -Select how the @var{text} is expanded. Can be either @code{none}, -@code{strftime} (deprecated) or -@code{normal} (default). See the @ref{drawtext_expansion, Text expansion} section -below for details. - -@item fix_bounds -If true, check and fix text coords to avoid clipping. - -@item fontcolor -The color to be used for drawing fonts. For the syntax of this option, check -the "Color" section in the ffmpeg-utils manual. - -The default value of @var{fontcolor} is "black". - -@item fontfile -The font file to be used for drawing text. Path must be included. -This parameter is mandatory. - -@item fontsize -The font size to be used for drawing text. -The default value of @var{fontsize} is 16. - -@item ft_load_flags -Flags to be used for loading the fonts. - -The flags map the corresponding flags supported by libfreetype, and are -a combination of the following values: -@table @var -@item default -@item no_scale -@item no_hinting -@item render -@item no_bitmap -@item vertical_layout -@item force_autohint -@item crop_bitmap -@item pedantic -@item ignore_global_advance_width -@item no_recurse -@item ignore_transform -@item monochrome -@item linear_design -@item no_autohint -@end table - -Default value is "render". - -For more information consult the documentation for the FT_LOAD_* -libfreetype flags. - -@item shadowcolor -The color to be used for drawing a shadow behind the drawn text. For the -syntax of this option, check the "Color" section in the ffmpeg-utils manual. - -The default value of @var{shadowcolor} is "black". - -@item shadowx -@item shadowy -The x and y offsets for the text shadow position with respect to the -position of the text. They can be either positive or negative -values. Default value for both is "0". - -@item start_number -The starting frame number for the n/frame_num variable. The default value -is "0". - -@item tabsize -The size in number of spaces to use for rendering the tab. -Default value is 4. - -@item timecode -Set the initial timecode representation in "hh:mm:ss[:;.]ff" -format. It can be used with or without text parameter. @var{timecode_rate} -option must be specified. - -@item timecode_rate, rate, r -Set the timecode frame rate (timecode only). - -@item text -The text string to be drawn. The text must be a sequence of UTF-8 -encoded characters. -This parameter is mandatory if no file is specified with the parameter -@var{textfile}. - -@item textfile -A text file containing text to be drawn. The text must be a sequence -of UTF-8 encoded characters. - -This parameter is mandatory if no text string is specified with the -parameter @var{text}. - -If both @var{text} and @var{textfile} are specified, an error is thrown. - -@item reload -If set to 1, the @var{textfile} will be reloaded before each frame. -Be sure to update it atomically, or it may be read partially, or even fail. - -@item x -@item y -The expressions which specify the offsets where text will be drawn -within the video frame. They are relative to the top/left border of the -output image. - -The default value of @var{x} and @var{y} is "0". - -See below for the list of accepted constants and functions. -@end table - -The parameters for @var{x} and @var{y} are expressions containing the -following constants and functions: - -@table @option -@item dar -input display aspect ratio, it is the same as (@var{w} / @var{h}) * @var{sar} - -@item hsub -@item vsub -horizontal and vertical chroma subsample values. For example for the -pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. - -@item line_h, lh -the height of each text line - -@item main_h, h, H -the input height - -@item main_w, w, W -the input width - -@item max_glyph_a, ascent -the maximum distance from the baseline to the highest/upper grid -coordinate used to place a glyph outline point, for all the rendered -glyphs. -It is a positive value, due to the grid's orientation with the Y axis -upwards. - -@item max_glyph_d, descent -the maximum distance from the baseline to the lowest grid coordinate -used to place a glyph outline point, for all the rendered glyphs. -This is a negative value, due to the grid's orientation, with the Y axis -upwards. - -@item max_glyph_h -maximum glyph height, that is the maximum height for all the glyphs -contained in the rendered text, it is equivalent to @var{ascent} - -@var{descent}. - -@item max_glyph_w -maximum glyph width, that is the maximum width for all the glyphs -contained in the rendered text - -@item n -the number of input frame, starting from 0 - -@item rand(min, max) -return a random number included between @var{min} and @var{max} - -@item sar -input sample aspect ratio - -@item t -timestamp expressed in seconds, NAN if the input timestamp is unknown - -@item text_h, th -the height of the rendered text - -@item text_w, tw -the width of the rendered text - -@item x -@item y -the x and y offset coordinates where the text is drawn. - -These parameters allow the @var{x} and @var{y} expressions to refer -each other, so you can for example specify @code{y=x/dar}. -@end table - -If libavfilter was built with @code{--enable-fontconfig}, then -@option{fontfile} can be a fontconfig pattern or omitted. - -@anchor{drawtext_expansion} -@subsection Text expansion - -If @option{expansion} is set to @code{strftime}, -the filter recognizes strftime() sequences in the provided text and -expands them accordingly. Check the documentation of strftime(). This -feature is deprecated. - -If @option{expansion} is set to @code{none}, the text is printed verbatim. - -If @option{expansion} is set to @code{normal} (which is the default), -the following expansion mechanism is used. - -The backslash character '\', followed by any character, always expands to -the second character. - -Sequence of the form @code{%@{...@}} are expanded. The text between the -braces is a function name, possibly followed by arguments separated by ':'. -If the arguments contain special characters or delimiters (':' or '@}'), -they should be escaped. - -Note that they probably must also be escaped as the value for the -@option{text} option in the filter argument string and as the filter -argument in the filtergraph description, and possibly also for the shell, -that makes up to four levels of escaping; using a text file avoids these -problems. - -The following functions are available: - -@table @command - -@item expr, e -The expression evaluation result. - -It must take one argument specifying the expression to be evaluated, -which accepts the same constants and functions as the @var{x} and -@var{y} values. Note that not all constants should be used, for -example the text size is not known when evaluating the expression, so -the constants @var{text_w} and @var{text_h} will have an undefined -value. - -@item gmtime -The time at which the filter is running, expressed in UTC. -It can accept an argument: a strftime() format string. - -@item localtime -The time at which the filter is running, expressed in the local time zone. -It can accept an argument: a strftime() format string. - -@item metadata -Frame metadata. It must take one argument specifying metadata key. - -@item n, frame_num -The frame number, starting from 0. - -@item pict_type -A 1 character description of the current picture type. - -@item pts -The timestamp of the current frame, in seconds, with microsecond accuracy. - -@end table - -@subsection Examples - -@itemize -@item -Draw "Test Text" with font FreeSerif, using the default values for the -optional parameters. - -@example -drawtext="fontfile=/usr/share/fonts/truetype/freefont/FreeSerif.ttf: text='Test Text'" -@end example - -@item -Draw 'Test Text' with font FreeSerif of size 24 at position x=100 -and y=50 (counting from the top-left corner of the screen), text is -yellow with a red box around it. Both the text and the box have an -opacity of 20%. - -@example -drawtext="fontfile=/usr/share/fonts/truetype/freefont/FreeSerif.ttf: text='Test Text':\ - x=100: y=50: fontsize=24: fontcolor=yellow@@0.2: box=1: boxcolor=red@@0.2" -@end example - -Note that the double quotes are not necessary if spaces are not used -within the parameter list. - -@item -Show the text at the center of the video frame: -@example -drawtext="fontsize=30:fontfile=FreeSerif.ttf:text='hello world':x=(w-text_w)/2:y=(h-text_h-line_h)/2" -@end example - -@item -Show a text line sliding from right to left in the last row of the video -frame. The file @file{LONG_LINE} is assumed to contain a single line -with no newlines. -@example -drawtext="fontsize=15:fontfile=FreeSerif.ttf:text=LONG_LINE:y=h-line_h:x=-50*t" -@end example - -@item -Show the content of file @file{CREDITS} off the bottom of the frame and scroll up. -@example -drawtext="fontsize=20:fontfile=FreeSerif.ttf:textfile=CREDITS:y=h-20*t" -@end example - -@item -Draw a single green letter "g", at the center of the input video. -The glyph baseline is placed at half screen height. -@example -drawtext="fontsize=60:fontfile=FreeSerif.ttf:fontcolor=green:text=g:x=(w-max_glyph_w)/2:y=h/2-ascent" -@end example - -@item -Show text for 1 second every 3 seconds: -@example -drawtext="fontfile=FreeSerif.ttf:fontcolor=white:x=100:y=x/dar:enable=lt(mod(t\,3)\,1):text='blink'" -@end example - -@item -Use fontconfig to set the font. Note that the colons need to be escaped. -@example -drawtext='fontfile=Linux Libertine O-40\:style=Semibold:text=FFmpeg' -@end example - -@item -Print the date of a real-time encoding (see strftime(3)): -@example -drawtext='fontfile=FreeSans.ttf:text=%@{localtime:%a %b %d %Y@}' -@end example - -@end itemize - -For more information about libfreetype, check: -@url{http://www.freetype.org/}. - -For more information about fontconfig, check: -@url{http://freedesktop.org/software/fontconfig/fontconfig-user.html}. - -@section edgedetect - -Detect and draw edges. The filter uses the Canny Edge Detection algorithm. - -The filter accepts the following options: - -@table @option -@item low -@item high -Set low and high threshold values used by the Canny thresholding -algorithm. - -The high threshold selects the "strong" edge pixels, which are then -connected through 8-connectivity with the "weak" edge pixels selected -by the low threshold. - -@var{low} and @var{high} threshold values must be choosen in the range -[0,1], and @var{low} should be lesser or equal to @var{high}. - -Default value for @var{low} is @code{20/255}, and default value for @var{high} -is @code{50/255}. -@end table - -Example: -@example -edgedetect=low=0.1:high=0.4 -@end example - -@section extractplanes - -Extract color channel components from input video stream into -separate grayscale video streams. - -The filter accepts the following option: - -@table @option -@item planes -Set plane(s) to extract. - -Available values for planes are: -@table @samp -@item y -@item u -@item v -@item a -@item r -@item g -@item b -@end table - -Choosing planes not available in the input will result in an error. -That means you cannot select @code{r}, @code{g}, @code{b} planes -with @code{y}, @code{u}, @code{v} planes at same time. -@end table - -@subsection Examples - -@itemize -@item -Extract luma, u and v color channel component from input video frame -into 3 grayscale outputs: -@example -ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi -@end example -@end itemize - -@section elbg - -Apply a posterize effect using the ELBG (Enhanced LBG) algorithm. - -For each input image, the filter will compute the optimal mapping from -the input to the output given the codebook length, that is the number -of distinct output colors. - -This filter accepts the following options. - -@table @option -@item codebook_length, l -Set codebook length. The value must be a positive integer, and -represents the number of distinct output colors. Default value is 256. - -@item nb_steps, n -Set the maximum number of iterations to apply for computing the optimal -mapping. The higher the value the better the result and the higher the -computation time. Default value is 1. - -@item seed, s -Set a random seed, must be an integer included between 0 and -UINT32_MAX. If not specified, or if explicitly set to -1, the filter -will try to use a good random seed on a best effort basis. -@end table - -@section fade - -Apply fade-in/out effect to input video. - -This filter accepts the following options: - -@table @option -@item type, t -The effect type -- can be either "in" for fade-in, or "out" for a fade-out -effect. -Default is @code{in}. - -@item start_frame, s -Specify the number of the start frame for starting to apply the fade -effect. Default is 0. - -@item nb_frames, n -The number of frames for which the fade effect has to last. At the end of the -fade-in effect the output video will have the same intensity as the input video, -at the end of the fade-out transition the output video will be filled with the -selected @option{color}. -Default is 25. - -@item alpha -If set to 1, fade only alpha channel, if one exists on the input. -Default value is 0. - -@item start_time, st -Specify the timestamp (in seconds) of the frame to start to apply the fade -effect. If both start_frame and start_time are specified, the fade will start at -whichever comes last. Default is 0. - -@item duration, d -The number of seconds for which the fade effect has to last. At the end of the -fade-in effect the output video will have the same intensity as the input video, -at the end of the fade-out transition the output video will be filled with the -selected @option{color}. -If both duration and nb_frames are specified, duration is used. Default is 0. - -@item color, c -Specify the color of the fade. Default is "black". -@end table - -@subsection Examples - -@itemize -@item -Fade in first 30 frames of video: -@example -fade=in:0:30 -@end example - -The command above is equivalent to: -@example -fade=t=in:s=0:n=30 -@end example - -@item -Fade out last 45 frames of a 200-frame video: -@example -fade=out:155:45 -fade=type=out:start_frame=155:nb_frames=45 -@end example - -@item -Fade in first 25 frames and fade out last 25 frames of a 1000-frame video: -@example -fade=in:0:25, fade=out:975:25 -@end example - -@item -Make first 5 frames yellow, then fade in from frame 5-24: -@example -fade=in:5:20:color=yellow -@end example - -@item -Fade in alpha over first 25 frames of video: -@example -fade=in:0:25:alpha=1 -@end example - -@item -Make first 5.5 seconds black, then fade in for 0.5 seconds: -@example -fade=t=in:st=5.5:d=0.5 -@end example - -@end itemize - -@section field - -Extract a single field from an interlaced image using stride -arithmetic to avoid wasting CPU time. The output frames are marked as -non-interlaced. - -The filter accepts the following options: - -@table @option -@item type -Specify whether to extract the top (if the value is @code{0} or -@code{top}) or the bottom field (if the value is @code{1} or -@code{bottom}). -@end table - -@section fieldmatch - -Field matching filter for inverse telecine. It is meant to reconstruct the -progressive frames from a telecined stream. The filter does not drop duplicated -frames, so to achieve a complete inverse telecine @code{fieldmatch} needs to be -followed by a decimation filter such as @ref{decimate} in the filtergraph. - -The separation of the field matching and the decimation is notably motivated by -the possibility of inserting a de-interlacing filter fallback between the two. -If the source has mixed telecined and real interlaced content, -@code{fieldmatch} will not be able to match fields for the interlaced parts. -But these remaining combed frames will be marked as interlaced, and thus can be -de-interlaced by a later filter such as @ref{yadif} before decimation. - -In addition to the various configuration options, @code{fieldmatch} can take an -optional second stream, activated through the @option{ppsrc} option. If -enabled, the frames reconstruction will be based on the fields and frames from -this second stream. This allows the first input to be pre-processed in order to -help the various algorithms of the filter, while keeping the output lossless -(assuming the fields are matched properly). Typically, a field-aware denoiser, -or brightness/contrast adjustments can help. - -Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project) -and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from -which @code{fieldmatch} is based on. While the semantic and usage are very -close, some behaviour and options names can differ. - -The filter accepts the following options: - -@table @option -@item order -Specify the assumed field order of the input stream. Available values are: - -@table @samp -@item auto -Auto detect parity (use FFmpeg's internal parity value). -@item bff -Assume bottom field first. -@item tff -Assume top field first. -@end table - -Note that it is sometimes recommended not to trust the parity announced by the -stream. - -Default value is @var{auto}. - -@item mode -Set the matching mode or strategy to use. @option{pc} mode is the safest in the -sense that it won't risk creating jerkiness due to duplicate frames when -possible, but if there are bad edits or blended fields it will end up -outputting combed frames when a good match might actually exist. On the other -hand, @option{pcn_ub} mode is the most risky in terms of creating jerkiness, -but will almost always find a good frame if there is one. The other values are -all somewhere in between @option{pc} and @option{pcn_ub} in terms of risking -jerkiness and creating duplicate frames versus finding good matches in sections -with bad edits, orphaned fields, blended fields, etc. - -More details about p/c/n/u/b are available in @ref{p/c/n/u/b meaning} section. - -Available values are: - -@table @samp -@item pc -2-way matching (p/c) -@item pc_n -2-way matching, and trying 3rd match if still combed (p/c + n) -@item pc_u -2-way matching, and trying 3rd match (same order) if still combed (p/c + u) -@item pc_n_ub -2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if -still combed (p/c + n + u/b) -@item pcn -3-way matching (p/c/n) -@item pcn_ub -3-way matching, and trying 4th/5th matches if all 3 of the original matches are -detected as combed (p/c/n + u/b) -@end table - -The parenthesis at the end indicate the matches that would be used for that -mode assuming @option{order}=@var{tff} (and @option{field} on @var{auto} or -@var{top}). - -In terms of speed @option{pc} mode is by far the fastest and @option{pcn_ub} is -the slowest. - -Default value is @var{pc_n}. - -@item ppsrc -Mark the main input stream as a pre-processed input, and enable the secondary -input stream as the clean source to pick the fields from. See the filter -introduction for more details. It is similar to the @option{clip2} feature from -VFM/TFM. - -Default value is @code{0} (disabled). - -@item field -Set the field to match from. It is recommended to set this to the same value as -@option{order} unless you experience matching failures with that setting. In -certain circumstances changing the field that is used to match from can have a -large impact on matching performance. Available values are: - -@table @samp -@item auto -Automatic (same value as @option{order}). -@item bottom -Match from the bottom field. -@item top -Match from the top field. -@end table - -Default value is @var{auto}. - -@item mchroma -Set whether or not chroma is included during the match comparisons. In most -cases it is recommended to leave this enabled. You should set this to @code{0} -only if your clip has bad chroma problems such as heavy rainbowing or other -artifacts. Setting this to @code{0} could also be used to speed things up at -the cost of some accuracy. - -Default value is @code{1}. - -@item y0 -@item y1 -These define an exclusion band which excludes the lines between @option{y0} and -@option{y1} from being included in the field matching decision. An exclusion -band can be used to ignore subtitles, a logo, or other things that may -interfere with the matching. @option{y0} sets the starting scan line and -@option{y1} sets the ending line; all lines in between @option{y0} and -@option{y1} (including @option{y0} and @option{y1}) will be ignored. Setting -@option{y0} and @option{y1} to the same value will disable the feature. -@option{y0} and @option{y1} defaults to @code{0}. - -@item scthresh -Set the scene change detection threshold as a percentage of maximum change on -the luma plane. Good values are in the @code{[8.0, 14.0]} range. Scene change -detection is only relevant in case @option{combmatch}=@var{sc}. The range for -@option{scthresh} is @code{[0.0, 100.0]}. - -Default value is @code{12.0}. - -@item combmatch -When @option{combatch} is not @var{none}, @code{fieldmatch} will take into -account the combed scores of matches when deciding what match to use as the -final match. Available values are: - -@table @samp -@item none -No final matching based on combed scores. -@item sc -Combed scores are only used when a scene change is detected. -@item full -Use combed scores all the time. -@end table - -Default is @var{sc}. - -@item combdbg -Force @code{fieldmatch} to calculate the combed metrics for certain matches and -print them. This setting is known as @option{micout} in TFM/VFM vocabulary. -Available values are: - -@table @samp -@item none -No forced calculation. -@item pcn -Force p/c/n calculations. -@item pcnub -Force p/c/n/u/b calculations. -@end table - -Default value is @var{none}. - -@item cthresh -This is the area combing threshold used for combed frame detection. This -essentially controls how "strong" or "visible" combing must be to be detected. -Larger values mean combing must be more visible and smaller values mean combing -can be less visible or strong and still be detected. Valid settings are from -@code{-1} (every pixel will be detected as combed) to @code{255} (no pixel will -be detected as combed). This is basically a pixel difference value. A good -range is @code{[8, 12]}. - -Default value is @code{9}. - -@item chroma -Sets whether or not chroma is considered in the combed frame decision. Only -disable this if your source has chroma problems (rainbowing, etc.) that are -causing problems for the combed frame detection with chroma enabled. Actually, -using @option{chroma}=@var{0} is usually more reliable, except for the case -where there is chroma only combing in the source. - -Default value is @code{0}. - -@item blockx -@item blocky -Respectively set the x-axis and y-axis size of the window used during combed -frame detection. This has to do with the size of the area in which -@option{combpel} pixels are required to be detected as combed for a frame to be -declared combed. See the @option{combpel} parameter description for more info. -Possible values are any number that is a power of 2 starting at 4 and going up -to 512. - -Default value is @code{16}. - -@item combpel -The number of combed pixels inside any of the @option{blocky} by -@option{blockx} size blocks on the frame for the frame to be detected as -combed. While @option{cthresh} controls how "visible" the combing must be, this -setting controls "how much" combing there must be in any localized area (a -window defined by the @option{blockx} and @option{blocky} settings) on the -frame. Minimum value is @code{0} and maximum is @code{blocky x blockx} (at -which point no frames will ever be detected as combed). This setting is known -as @option{MI} in TFM/VFM vocabulary. - -Default value is @code{80}. -@end table - -@anchor{p/c/n/u/b meaning} -@subsection p/c/n/u/b meaning - -@subsubsection p/c/n - -We assume the following telecined stream: - -@example -Top fields: 1 2 2 3 4 -Bottom fields: 1 2 3 4 4 -@end example - -The numbers correspond to the progressive frame the fields relate to. Here, the -first two frames are progressive, the 3rd and 4th are combed, and so on. - -When @code{fieldmatch} is configured to run a matching from bottom -(@option{field}=@var{bottom}) this is how this input stream get transformed: - -@example -Input stream: - T 1 2 2 3 4 - B 1 2 3 4 4 <-- matching reference - -Matches: c c n n c - -Output stream: - T 1 2 3 4 4 - B 1 2 3 4 4 -@end example - -As a result of the field matching, we can see that some frames get duplicated. -To perform a complete inverse telecine, you need to rely on a decimation filter -after this operation. See for instance the @ref{decimate} filter. - -The same operation now matching from top fields (@option{field}=@var{top}) -looks like this: - -@example -Input stream: - T 1 2 2 3 4 <-- matching reference - B 1 2 3 4 4 - -Matches: c c p p c - -Output stream: - T 1 2 2 3 4 - B 1 2 2 3 4 -@end example - -In these examples, we can see what @var{p}, @var{c} and @var{n} mean; -basically, they refer to the frame and field of the opposite parity: - -@itemize -@item @var{p} matches the field of the opposite parity in the previous frame -@item @var{c} matches the field of the opposite parity in the current frame -@item @var{n} matches the field of the opposite parity in the next frame -@end itemize - -@subsubsection u/b - -The @var{u} and @var{b} matching are a bit special in the sense that they match -from the opposite parity flag. In the following examples, we assume that we are -currently matching the 2nd frame (Top:2, bottom:2). According to the match, a -'x' is placed above and below each matched fields. - -With bottom matching (@option{field}=@var{bottom}): -@example -Match: c p n b u - - x x x x x - Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2 - Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3 - x x x x x - -Output frames: - 2 1 2 2 2 - 2 2 2 1 3 -@end example - -With top matching (@option{field}=@var{top}): -@example -Match: c p n b u - - x x x x x - Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2 - Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3 - x x x x x - -Output frames: - 2 2 2 1 2 - 2 1 3 2 2 -@end example - -@subsection Examples - -Simple IVTC of a top field first telecined stream: -@example -fieldmatch=order=tff:combmatch=none, decimate -@end example - -Advanced IVTC, with fallback on @ref{yadif} for still combed frames: -@example -fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate -@end example - -@section fieldorder - -Transform the field order of the input video. - -This filter accepts the following options: - -@table @option - -@item order -Output field order. Valid values are @var{tff} for top field first or @var{bff} -for bottom field first. -@end table - -Default value is @samp{tff}. - -Transformation is achieved by shifting the picture content up or down -by one line, and filling the remaining line with appropriate picture content. -This method is consistent with most broadcast field order converters. - -If the input video is not flagged as being interlaced, or it is already -flagged as being of the required output field order then this filter does -not alter the incoming video. - -This filter is very useful when converting to or from PAL DV material, -which is bottom field first. - -For example: -@example -ffmpeg -i in.vob -vf "fieldorder=bff" out.dv -@end example - -@section fifo - -Buffer input images and send them when they are requested. - -This filter is mainly useful when auto-inserted by the libavfilter -framework. - -The filter does not take parameters. - -@anchor{format} -@section format - -Convert the input video to one of the specified pixel formats. -Libavfilter will try to pick one that is supported for the input to -the next filter. - -This filter accepts the following parameters: -@table @option - -@item pix_fmts -A '|'-separated list of pixel format names, for example -"pix_fmts=yuv420p|monow|rgb24". - -@end table - -@subsection Examples - -@itemize -@item -Convert the input video to the format @var{yuv420p} -@example -format=pix_fmts=yuv420p -@end example - -Convert the input video to any of the formats in the list -@example -format=pix_fmts=yuv420p|yuv444p|yuv410p -@end example -@end itemize - -@anchor{fps} -@section fps - -Convert the video to specified constant frame rate by duplicating or dropping -frames as necessary. - -This filter accepts the following named parameters: -@table @option - -@item fps -Desired output frame rate. The default is @code{25}. - -@item round -Rounding method. - -Possible values are: -@table @option -@item zero -zero round towards 0 -@item inf -round away from 0 -@item down -round towards -infinity -@item up -round towards +infinity -@item near -round to nearest -@end table -The default is @code{near}. - -@item start_time -Assume the first PTS should be the given value, in seconds. This allows for -padding/trimming at the start of stream. By default, no assumption is made -about the first frame's expected PTS, so no padding or trimming is done. -For example, this could be set to 0 to pad the beginning with duplicates of -the first frame if a video stream starts after the audio stream or to trim any -frames with a negative PTS. - -@end table - -Alternatively, the options can be specified as a flat string: -@var{fps}[:@var{round}]. - -See also the @ref{setpts} filter. - -@subsection Examples - -@itemize -@item -A typical usage in order to set the fps to 25: -@example -fps=fps=25 -@end example - -@item -Sets the fps to 24, using abbreviation and rounding method to round to nearest: -@example -fps=fps=film:round=near -@end example -@end itemize - -@section framestep - -Select one frame every N-th frame. - -This filter accepts the following option: -@table @option -@item step -Select frame after every @code{step} frames. -Allowed values are positive integers higher than 0. Default value is @code{1}. -@end table - -@anchor{frei0r} -@section frei0r - -Apply a frei0r effect to the input video. - -To enable compilation of this filter you need to install the frei0r -header and configure FFmpeg with @code{--enable-frei0r}. - -This filter accepts the following options: - -@table @option - -@item filter_name -The name to the frei0r effect to load. If the environment variable -@env{FREI0R_PATH} is defined, the frei0r effect is searched in each one of the -directories specified by the colon separated list in @env{FREIOR_PATH}, -otherwise in the standard frei0r paths, which are in this order: -@file{HOME/.frei0r-1/lib/}, @file{/usr/local/lib/frei0r-1/}, -@file{/usr/lib/frei0r-1/}. - -@item filter_params -A '|'-separated list of parameters to pass to the frei0r effect. - -@end table - -A frei0r effect parameter can be a boolean (whose values are specified -with "y" and "n"), a double, a color (specified by the syntax -@var{R}/@var{G}/@var{B}, (@var{R}, @var{G}, and @var{B} being float -numbers from 0.0 to 1.0) or by a color description specified in the "Color" -section in the ffmpeg-utils manual), a position (specified by the syntax @var{X}/@var{Y}, -@var{X} and @var{Y} being float numbers) and a string. - -The number and kind of parameters depend on the loaded effect. If an -effect parameter is not specified the default value is set. - -@subsection Examples - -@itemize -@item -Apply the distort0r effect, set the first two double parameters: -@example -frei0r=filter_name=distort0r:filter_params=0.5|0.01 -@end example - -@item -Apply the colordistance effect, take a color as first parameter: -@example -frei0r=colordistance:0.2/0.3/0.4 -frei0r=colordistance:violet -frei0r=colordistance:0x112233 -@end example - -@item -Apply the perspective effect, specify the top left and top right image -positions: -@example -frei0r=perspective:0.2/0.2|0.8/0.2 -@end example -@end itemize - -For more information see: -@url{http://frei0r.dyne.org} - -@section geq - -The filter accepts the following options: - -@table @option -@item lum_expr, lum -Set the luminance expression. -@item cb_expr, cb -Set the chrominance blue expression. -@item cr_expr, cr -Set the chrominance red expression. -@item alpha_expr, a -Set the alpha expression. -@item red_expr, r -Set the red expression. -@item green_expr, g -Set the green expression. -@item blue_expr, b -Set the blue expression. -@end table - -The colorspace is selected according to the specified options. If one -of the @option{lum_expr}, @option{cb_expr}, or @option{cr_expr} -options is specified, the filter will automatically select a YCbCr -colorspace. If one of the @option{red_expr}, @option{green_expr}, or -@option{blue_expr} options is specified, it will select an RGB -colorspace. - -If one of the chrominance expression is not defined, it falls back on the other -one. If no alpha expression is specified it will evaluate to opaque value. -If none of chrominance expressions are specified, they will evaluate -to the luminance expression. - -The expressions can use the following variables and functions: - -@table @option -@item N -The sequential number of the filtered frame, starting from @code{0}. - -@item X -@item Y -The coordinates of the current sample. - -@item W -@item H -The width and height of the image. - -@item SW -@item SH -Width and height scale depending on the currently filtered plane. It is the -ratio between the corresponding luma plane number of pixels and the current -plane ones. E.g. for YUV4:2:0 the values are @code{1,1} for the luma plane, and -@code{0.5,0.5} for chroma planes. - -@item T -Time of the current frame, expressed in seconds. - -@item p(x, y) -Return the value of the pixel at location (@var{x},@var{y}) of the current -plane. - -@item lum(x, y) -Return the value of the pixel at location (@var{x},@var{y}) of the luminance -plane. - -@item cb(x, y) -Return the value of the pixel at location (@var{x},@var{y}) of the -blue-difference chroma plane. Return 0 if there is no such plane. - -@item cr(x, y) -Return the value of the pixel at location (@var{x},@var{y}) of the -red-difference chroma plane. Return 0 if there is no such plane. - -@item r(x, y) -@item g(x, y) -@item b(x, y) -Return the value of the pixel at location (@var{x},@var{y}) of the -red/green/blue component. Return 0 if there is no such component. - -@item alpha(x, y) -Return the value of the pixel at location (@var{x},@var{y}) of the alpha -plane. Return 0 if there is no such plane. -@end table - -For functions, if @var{x} and @var{y} are outside the area, the value will be -automatically clipped to the closer edge. - -@subsection Examples - -@itemize -@item -Flip the image horizontally: -@example -geq=p(W-X\,Y) -@end example - -@item -Generate a bidimensional sine wave, with angle @code{PI/3} and a -wavelength of 100 pixels: -@example -geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128 -@end example - -@item -Generate a fancy enigmatic moving light: -@example -nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128 -@end example - -@item -Generate a quick emboss effect: -@example -format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2' -@end example - -@item -Modify RGB components depending on pixel position: -@example -geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)' -@end example -@end itemize - -@section gradfun - -Fix the banding artifacts that are sometimes introduced into nearly flat -regions by truncation to 8bit color depth. -Interpolate the gradients that should go where the bands are, and -dither them. - -This filter is designed for playback only. Do not use it prior to -lossy compression, because compression tends to lose the dither and -bring back the bands. - -This filter accepts the following options: - -@table @option - -@item strength -The maximum amount by which the filter will change any one pixel. Also the -threshold for detecting nearly flat regions. Acceptable values range from .51 to -64, default value is 1.2, out-of-range values will be clipped to the valid -range. - -@item radius -The neighborhood to fit the gradient to. A larger radius makes for smoother -gradients, but also prevents the filter from modifying the pixels near detailed -regions. Acceptable values are 8-32, default value is 16, out-of-range values -will be clipped to the valid range. - -@end table - -Alternatively, the options can be specified as a flat string: -@var{strength}[:@var{radius}] - -@subsection Examples - -@itemize -@item -Apply the filter with a @code{3.5} strength and radius of @code{8}: -@example -gradfun=3.5:8 -@end example - -@item -Specify radius, omitting the strength (which will fall-back to the default -value): -@example -gradfun=radius=8 -@end example - -@end itemize - -@anchor{haldclut} -@section haldclut - -Apply a Hald CLUT to a video stream. - -First input is the video stream to process, and second one is the Hald CLUT. -The Hald CLUT input can be a simple picture or a complete video stream. - -The filter accepts the following options: - -@table @option -@item shortest -Force termination when the shortest input terminates. Default is @code{0}. -@item repeatlast -Continue applying the last CLUT after the end of the stream. A value of -@code{0} disable the filter after the last frame of the CLUT is reached. -Default is @code{1}. -@end table - -@code{haldclut} also has the same interpolation options as @ref{lut3d} (both -filters share the same internals). - -More information about the Hald CLUT can be found on Eskil Steenberg's website -(Hald CLUT author) at @url{http://www.quelsolaar.com/technology/clut.html}. - -@subsection Workflow examples - -@subsubsection Hald CLUT video stream - -Generate an identity Hald CLUT stream altered with various effects: -@example -ffmpeg -f lavfi -i @ref{haldclutsrc}=8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut -@end example - -Note: make sure you use a lossless codec. - -Then use it with @code{haldclut} to apply it on some random stream: -@example -ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv -@end example - -The Hald CLUT will be applied to the 10 first seconds (duration of -@file{clut.nut}), then the latest picture of that CLUT stream will be applied -to the remaining frames of the @code{mandelbrot} stream. - -@subsubsection Hald CLUT with preview - -A Hald CLUT is supposed to be a squared image of @code{Level*Level*Level} by -@code{Level*Level*Level} pixels. For a given Hald CLUT, FFmpeg will select the -biggest possible square starting at the top left of the picture. The remaining -padding pixels (bottom or right) will be ignored. This area can be used to add -a preview of the Hald CLUT. - -Typically, the following generated Hald CLUT will be supported by the -@code{haldclut} filter: - -@example -ffmpeg -f lavfi -i @ref{haldclutsrc}=8 -vf " - pad=iw+320 [padded_clut]; - smptebars=s=320x256, split [a][b]; - [padded_clut][a] overlay=W-320:h, curves=color_negative [main]; - [main][b] overlay=W-320" -frames:v 1 clut.png -@end example - -It contains the original and a preview of the effect of the CLUT: SMPTE color -bars are displayed on the right-top, and below the same color bars processed by -the color changes. - -Then, the effect of this Hald CLUT can be visualized with: -@example -ffplay input.mkv -vf "movie=clut.png, [in] haldclut" -@end example - -@section hflip - -Flip the input video horizontally. - -For example to horizontally flip the input video with @command{ffmpeg}: -@example -ffmpeg -i in.avi -vf "hflip" out.avi -@end example - -@section histeq -This filter applies a global color histogram equalization on a -per-frame basis. - -It can be used to correct video that has a compressed range of pixel -intensities. The filter redistributes the pixel intensities to -equalize their distribution across the intensity range. It may be -viewed as an "automatically adjusting contrast filter". This filter is -useful only for correcting degraded or poorly captured source -video. - -The filter accepts the following options: - -@table @option -@item strength -Determine the amount of equalization to be applied. As the strength -is reduced, the distribution of pixel intensities more-and-more -approaches that of the input frame. The value must be a float number -in the range [0,1] and defaults to 0.200. - -@item intensity -Set the maximum intensity that can generated and scale the output -values appropriately. The strength should be set as desired and then -the intensity can be limited if needed to avoid washing-out. The value -must be a float number in the range [0,1] and defaults to 0.210. - -@item antibanding -Set the antibanding level. If enabled the filter will randomly vary -the luminance of output pixels by a small amount to avoid banding of -the histogram. Possible values are @code{none}, @code{weak} or -@code{strong}. It defaults to @code{none}. -@end table - -@section histogram - -Compute and draw a color distribution histogram for the input video. - -The computed histogram is a representation of distribution of color components -in an image. - -The filter accepts the following options: - -@table @option -@item mode -Set histogram mode. - -It accepts the following values: -@table @samp -@item levels -standard histogram that display color components distribution in an image. -Displays color graph for each color component. Shows distribution -of the Y, U, V, A or R, G, B components, depending on input format, -in current frame. Bellow each graph is color component scale meter. - -@item color -chroma values in vectorscope, if brighter more such chroma values are -distributed in an image. -Displays chroma values (U/V color placement) in two dimensional graph -(which is called a vectorscope). It can be used to read of the hue and -saturation of the current frame. At a same time it is a histogram. -The whiter a pixel in the vectorscope, the more pixels of the input frame -correspond to that pixel (that is the more pixels have this chroma value). -The V component is displayed on the horizontal (X) axis, with the leftmost -side being V = 0 and the rightmost side being V = 255. -The U component is displayed on the vertical (Y) axis, with the top -representing U = 0 and the bottom representing U = 255. - -The position of a white pixel in the graph corresponds to the chroma value -of a pixel of the input clip. So the graph can be used to read of the -hue (color flavor) and the saturation (the dominance of the hue in the color). -As the hue of a color changes, it moves around the square. At the center of -the square, the saturation is zero, which means that the corresponding pixel -has no color. If you increase the amount of a specific color, while leaving -the other colors unchanged, the saturation increases, and you move towards -the edge of the square. - -@item color2 -chroma values in vectorscope, similar as @code{color} but actual chroma values -are displayed. - -@item waveform -per row/column color component graph. In row mode graph in the left side represents -color component value 0 and right side represents value = 255. In column mode top -side represents color component value = 0 and bottom side represents value = 255. -@end table -Default value is @code{levels}. - -@item level_height -Set height of level in @code{levels}. Default value is @code{200}. -Allowed range is [50, 2048]. - -@item scale_height -Set height of color scale in @code{levels}. Default value is @code{12}. -Allowed range is [0, 40]. - -@item step -Set step for @code{waveform} mode. Smaller values are useful to find out how much -of same luminance values across input rows/columns are distributed. -Default value is @code{10}. Allowed range is [1, 255]. - -@item waveform_mode -Set mode for @code{waveform}. Can be either @code{row}, or @code{column}. -Default is @code{row}. - -@item waveform_mirror -Set mirroring mode for @code{waveform}. @code{0} means unmirrored, @code{1} -means mirrored. In mirrored mode, higher values will be represented on the left -side for @code{row} mode and at the top for @code{column} mode. Default is -@code{0} (unmirrored). - -@item display_mode -Set display mode for @code{waveform} and @code{levels}. -It accepts the following values: -@table @samp -@item parade -Display separate graph for the color components side by side in -@code{row} waveform mode or one below other in @code{column} waveform mode -for @code{waveform} histogram mode. For @code{levels} histogram mode -per color component graphs are placed one bellow other. - -This display mode in @code{waveform} histogram mode makes it easy to spot -color casts in the highlights and shadows of an image, by comparing the -contours of the top and the bottom of each waveform. -Since whites, grays, and blacks are characterized by -exactly equal amounts of red, green, and blue, neutral areas of the -picture should display three waveforms of roughly equal width/height. -If not, the correction is easy to make by making adjustments to level the -three waveforms. - -@item overlay -Presents information that's identical to that in the @code{parade}, except -that the graphs representing color components are superimposed directly -over one another. - -This display mode in @code{waveform} histogram mode can make it easier to spot -the relative differences or similarities in overlapping areas of the color -components that are supposed to be identical, such as neutral whites, grays, -or blacks. -@end table -Default is @code{parade}. - -@item levels_mode -Set mode for @code{levels}. Can be either @code{linear}, or @code{logarithmic}. -Default is @code{linear}. -@end table - -@subsection Examples - -@itemize - -@item -Calculate and draw histogram: -@example -ffplay -i input -vf histogram -@end example - -@end itemize - -@anchor{hqdn3d} -@section hqdn3d - -High precision/quality 3d denoise filter. This filter aims to reduce -image noise producing smooth images and making still images really -still. It should enhance compressibility. - -It accepts the following optional parameters: - -@table @option -@item luma_spatial -a non-negative float number which specifies spatial luma strength, -defaults to 4.0 - -@item chroma_spatial -a non-negative float number which specifies spatial chroma strength, -defaults to 3.0*@var{luma_spatial}/4.0 - -@item luma_tmp -a float number which specifies luma temporal strength, defaults to -6.0*@var{luma_spatial}/4.0 - -@item chroma_tmp -a float number which specifies chroma temporal strength, defaults to -@var{luma_tmp}*@var{chroma_spatial}/@var{luma_spatial} -@end table - -@section hue - -Modify the hue and/or the saturation of the input. - -This filter accepts the following options: - -@table @option -@item h -Specify the hue angle as a number of degrees. It accepts an expression, -and defaults to "0". - -@item s -Specify the saturation in the [-10,10] range. It accepts an expression and -defaults to "1". - -@item H -Specify the hue angle as a number of radians. It accepts an -expression, and defaults to "0". - -@item b -Specify the brightness in the [-10,10] range. It accepts an expression and -defaults to "0". -@end table - -@option{h} and @option{H} are mutually exclusive, and can't be -specified at the same time. - -The @option{b}, @option{h}, @option{H} and @option{s} option values are -expressions containing the following constants: - -@table @option -@item n -frame count of the input frame starting from 0 - -@item pts -presentation timestamp of the input frame expressed in time base units - -@item r -frame rate of the input video, NAN if the input frame rate is unknown - -@item t -timestamp expressed in seconds, NAN if the input timestamp is unknown - -@item tb -time base of the input video -@end table - -@subsection Examples - -@itemize -@item -Set the hue to 90 degrees and the saturation to 1.0: -@example -hue=h=90:s=1 -@end example - -@item -Same command but expressing the hue in radians: -@example -hue=H=PI/2:s=1 -@end example - -@item -Rotate hue and make the saturation swing between 0 -and 2 over a period of 1 second: -@example -hue="H=2*PI*t: s=sin(2*PI*t)+1" -@end example - -@item -Apply a 3 seconds saturation fade-in effect starting at 0: -@example -hue="s=min(t/3\,1)" -@end example - -The general fade-in expression can be written as: -@example -hue="s=min(0\, max((t-START)/DURATION\, 1))" -@end example - -@item -Apply a 3 seconds saturation fade-out effect starting at 5 seconds: -@example -hue="s=max(0\, min(1\, (8-t)/3))" -@end example - -The general fade-out expression can be written as: -@example -hue="s=max(0\, min(1\, (START+DURATION-t)/DURATION))" -@end example - -@end itemize - -@subsection Commands - -This filter supports the following commands: -@table @option -@item b -@item s -@item h -@item H -Modify the hue and/or the saturation and/or brightness of the input video. -The command accepts the same syntax of the corresponding option. - -If the specified expression is not valid, it is kept at its current -value. -@end table - -@section idet - -Detect video interlacing type. - -This filter tries to detect if the input is interlaced or progressive, -top or bottom field first. - -The filter accepts the following options: - -@table @option -@item intl_thres -Set interlacing threshold. -@item prog_thres -Set progressive threshold. -@end table - -@section il - -Deinterleave or interleave fields. - -This filter allows to process interlaced images fields without -deinterlacing them. Deinterleaving splits the input frame into 2 -fields (so called half pictures). Odd lines are moved to the top -half of the output image, even lines to the bottom half. -You can process (filter) them independently and then re-interleave them. - -The filter accepts the following options: - -@table @option -@item luma_mode, l -@item chroma_mode, c -@item alpha_mode, a -Available values for @var{luma_mode}, @var{chroma_mode} and -@var{alpha_mode} are: - -@table @samp -@item none -Do nothing. - -@item deinterleave, d -Deinterleave fields, placing one above the other. - -@item interleave, i -Interleave fields. Reverse the effect of deinterleaving. -@end table -Default value is @code{none}. - -@item luma_swap, ls -@item chroma_swap, cs -@item alpha_swap, as -Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is @code{0}. -@end table - -@section interlace - -Simple interlacing filter from progressive contents. This interleaves upper (or -lower) lines from odd frames with lower (or upper) lines from even frames, -halving the frame rate and preserving image height. - -@example - Original Original New Frame - Frame 'j' Frame 'j+1' (tff) - ========== =========== ================== - Line 0 --------------------> Frame 'j' Line 0 - Line 1 Line 1 ----> Frame 'j+1' Line 1 - Line 2 ---------------------> Frame 'j' Line 2 - Line 3 Line 3 ----> Frame 'j+1' Line 3 - ... ... ... -New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on -@end example - -It accepts the following optional parameters: - -@table @option -@item scan -determines whether the interlaced frame is taken from the even (tff - default) -or odd (bff) lines of the progressive frame. - -@item lowpass -Enable (default) or disable the vertical lowpass filter to avoid twitter -interlacing and reduce moire patterns. -@end table - -@section kerndeint - -Deinterlace input video by applying Donald Graft's adaptive kernel -deinterling. Work on interlaced parts of a video to produce -progressive frames. - -The description of the accepted parameters follows. - -@table @option -@item thresh -Set the threshold which affects the filter's tolerance when -determining if a pixel line must be processed. It must be an integer -in the range [0,255] and defaults to 10. A value of 0 will result in -applying the process on every pixels. - -@item map -Paint pixels exceeding the threshold value to white if set to 1. -Default is 0. - -@item order -Set the fields order. Swap fields if set to 1, leave fields alone if -0. Default is 0. - -@item sharp -Enable additional sharpening if set to 1. Default is 0. - -@item twoway -Enable twoway sharpening if set to 1. Default is 0. -@end table - -@subsection Examples - -@itemize -@item -Apply default values: -@example -kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0 -@end example - -@item -Enable additional sharpening: -@example -kerndeint=sharp=1 -@end example - -@item -Paint processed pixels in white: -@example -kerndeint=map=1 -@end example -@end itemize - -@anchor{lut3d} -@section lut3d - -Apply a 3D LUT to an input video. - -The filter accepts the following options: - -@table @option -@item file -Set the 3D LUT file name. - -Currently supported formats: -@table @samp -@item 3dl -AfterEffects -@item cube -Iridas -@item dat -DaVinci -@item m3d -Pandora -@end table -@item interp -Select interpolation mode. - -Available values are: - -@table @samp -@item nearest -Use values from the nearest defined point. -@item trilinear -Interpolate values using the 8 points defining a cube. -@item tetrahedral -Interpolate values using a tetrahedron. -@end table -@end table - -@section lut, lutrgb, lutyuv - -Compute a look-up table for binding each pixel component input value -to an output value, and apply it to input video. - -@var{lutyuv} applies a lookup table to a YUV input video, @var{lutrgb} -to an RGB input video. - -These filters accept the following options: -@table @option -@item c0 -set first pixel component expression -@item c1 -set second pixel component expression -@item c2 -set third pixel component expression -@item c3 -set fourth pixel component expression, corresponds to the alpha component - -@item r -set red component expression -@item g -set green component expression -@item b -set blue component expression -@item a -alpha component expression - -@item y -set Y/luminance component expression -@item u -set U/Cb component expression -@item v -set V/Cr component expression -@end table - -Each of them specifies the expression to use for computing the lookup table for -the corresponding pixel component values. - -The exact component associated to each of the @var{c*} options depends on the -format in input. - -The @var{lut} filter requires either YUV or RGB pixel formats in input, -@var{lutrgb} requires RGB pixel formats in input, and @var{lutyuv} requires YUV. - -The expressions can contain the following constants and functions: - -@table @option -@item w -@item h -the input width and height - -@item val -input value for the pixel component - -@item clipval -the input value clipped in the @var{minval}-@var{maxval} range - -@item maxval -maximum value for the pixel component - -@item minval -minimum value for the pixel component - -@item negval -the negated value for the pixel component value clipped in the -@var{minval}-@var{maxval} range , it corresponds to the expression -"maxval-clipval+minval" - -@item clip(val) -the computed value in @var{val} clipped in the -@var{minval}-@var{maxval} range - -@item gammaval(gamma) -the computed gamma correction value of the pixel component value -clipped in the @var{minval}-@var{maxval} range, corresponds to the -expression -"pow((clipval-minval)/(maxval-minval)\,@var{gamma})*(maxval-minval)+minval" - -@end table - -All expressions default to "val". - -@subsection Examples - -@itemize -@item -Negate input video: -@example -lutrgb="r=maxval+minval-val:g=maxval+minval-val:b=maxval+minval-val" -lutyuv="y=maxval+minval-val:u=maxval+minval-val:v=maxval+minval-val" -@end example - -The above is the same as: -@example -lutrgb="r=negval:g=negval:b=negval" -lutyuv="y=negval:u=negval:v=negval" -@end example - -@item -Negate luminance: -@example -lutyuv=y=negval -@end example - -@item -Remove chroma components, turns the video into a graytone image: -@example -lutyuv="u=128:v=128" -@end example - -@item -Apply a luma burning effect: -@example -lutyuv="y=2*val" -@end example - -@item -Remove green and blue components: -@example -lutrgb="g=0:b=0" -@end example - -@item -Set a constant alpha channel value on input: -@example -format=rgba,lutrgb=a="maxval-minval/2" -@end example - -@item -Correct luminance gamma by a 0.5 factor: -@example -lutyuv=y=gammaval(0.5) -@end example - -@item -Discard least significant bits of luma: -@example -lutyuv=y='bitand(val, 128+64+32)' -@end example -@end itemize - -@section mergeplanes - -Merge color channel components from several video streams. - -The filter accepts up to 4 input streams, and merge selected input -planes to the output video. - -This filter accepts the following options: -@table @option -@item mapping -Set input to output plane mapping. Default is @code{0}. - -The mappings is specified as a bitmap. It should be specified as a -hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. 'Aa' describes the -mapping for the first plane of the output stream. 'A' sets the number of -the input stream to use (from 0 to 3), and 'a' the plane number of the -corresponding input to use (from 0 to 3). The rest of the mappings is -similar, 'Bb' describes the mapping for the output stream second -plane, 'Cc' describes the mapping for the output stream third plane and -'Dd' describes the mapping for the output stream fourth plane. - -@item format -Set output pixel format. Default is @code{yuva444p}. -@end table - -@subsection Examples - -@itemize -@item -Merge three gray video streams of same width and height into single video stream: -@example -[a0][a1][a2]mergeplanes=0x001020:yuv444p -@end example - -@item -Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream: -@example -[a0][a1]mergeplanes=0x00010210:yuva444p -@end example - -@item -Swap Y and A plane in yuva444p stream: -@example -format=yuva444p,mergeplanes=0x03010200:yuva444p -@end example - -@item -Swap U and V plane in yuv420p stream: -@example -format=yuv420p,mergeplanes=0x000201:yuv420p -@end example - -@item -Cast a rgb24 clip to yuv444p: -@example -format=rgb24,mergeplanes=0x000102:yuv444p -@end example -@end itemize - -@section mcdeint - -Apply motion-compensation deinterlacing. - -It needs one field per frame as input and must thus be used together -with yadif=1/3 or equivalent. - -This filter accepts the following options: -@table @option -@item mode -Set the deinterlacing mode. - -It accepts one of the following values: -@table @samp -@item fast -@item medium -@item slow -use iterative motion estimation -@item extra_slow -like @samp{slow}, but use multiple reference frames. -@end table -Default value is @samp{fast}. - -@item parity -Set the picture field parity assumed for the input video. It must be -one of the following values: - -@table @samp -@item 0, tff -assume top field first -@item 1, bff -assume bottom field first -@end table - -Default value is @samp{bff}. - -@item qp -Set per-block quantization parameter (QP) used by the internal -encoder. - -Higher values should result in a smoother motion vector field but less -optimal individual vectors. Default value is 1. -@end table - -@section mp - -Apply an MPlayer filter to the input video. - -This filter provides a wrapper around some of the filters of -MPlayer/MEncoder. - -This wrapper is considered experimental. Some of the wrapped filters -may not work properly and we may drop support for them, as they will -be implemented natively into FFmpeg. Thus you should avoid -depending on them when writing portable scripts. - -The filter accepts the parameters: -@var{filter_name}[:=]@var{filter_params} - -@var{filter_name} is the name of a supported MPlayer filter, -@var{filter_params} is a string containing the parameters accepted by -the named filter. - -The list of the currently supported filters follows: -@table @var -@item eq2 -@item eq -@item fspp -@item ilpack -@item pp7 -@item softpulldown -@item uspp -@end table - -The parameter syntax and behavior for the listed filters are the same -of the corresponding MPlayer filters. For detailed instructions check -the "VIDEO FILTERS" section in the MPlayer manual. - -@subsection Examples - -@itemize -@item -Adjust gamma, brightness, contrast: -@example -mp=eq2=1.0:2:0.5 -@end example -@end itemize - -See also mplayer(1), @url{http://www.mplayerhq.hu/}. - -@section mpdecimate - -Drop frames that do not differ greatly from the previous frame in -order to reduce frame rate. - -The main use of this filter is for very-low-bitrate encoding -(e.g. streaming over dialup modem), but it could in theory be used for -fixing movies that were inverse-telecined incorrectly. - -A description of the accepted options follows. - -@table @option -@item max -Set the maximum number of consecutive frames which can be dropped (if -positive), or the minimum interval between dropped frames (if -negative). If the value is 0, the frame is dropped unregarding the -number of previous sequentially dropped frames. - -Default value is 0. - -@item hi -@item lo -@item frac -Set the dropping threshold values. - -Values for @option{hi} and @option{lo} are for 8x8 pixel blocks and -represent actual pixel value differences, so a threshold of 64 -corresponds to 1 unit of difference for each pixel, or the same spread -out differently over the block. - -A frame is a candidate for dropping if no 8x8 blocks differ by more -than a threshold of @option{hi}, and if no more than @option{frac} blocks (1 -meaning the whole image) differ by more than a threshold of @option{lo}. - -Default value for @option{hi} is 64*12, default value for @option{lo} is -64*5, and default value for @option{frac} is 0.33. -@end table - - -@section negate - -Negate input video. - -This filter accepts an integer in input, if non-zero it negates the -alpha component (if available). The default value in input is 0. - -@section noformat - -Force libavfilter not to use any of the specified pixel formats for the -input to the next filter. - -This filter accepts the following parameters: -@table @option - -@item pix_fmts -A '|'-separated list of pixel format names, for example -"pix_fmts=yuv420p|monow|rgb24". - -@end table - -@subsection Examples - -@itemize -@item -Force libavfilter to use a format different from @var{yuv420p} for the -input to the vflip filter: -@example -noformat=pix_fmts=yuv420p,vflip -@end example - -@item -Convert the input video to any of the formats not contained in the list: -@example -noformat=yuv420p|yuv444p|yuv410p -@end example -@end itemize - -@section noise - -Add noise on video input frame. - -The filter accepts the following options: - -@table @option -@item all_seed -@item c0_seed -@item c1_seed -@item c2_seed -@item c3_seed -Set noise seed for specific pixel component or all pixel components in case -of @var{all_seed}. Default value is @code{123457}. - -@item all_strength, alls -@item c0_strength, c0s -@item c1_strength, c1s -@item c2_strength, c2s -@item c3_strength, c3s -Set noise strength for specific pixel component or all pixel components in case -@var{all_strength}. Default value is @code{0}. Allowed range is [0, 100]. - -@item all_flags, allf -@item c0_flags, c0f -@item c1_flags, c1f -@item c2_flags, c2f -@item c3_flags, c3f -Set pixel component flags or set flags for all components if @var{all_flags}. -Available values for component flags are: -@table @samp -@item a -averaged temporal noise (smoother) -@item p -mix random noise with a (semi)regular pattern -@item t -temporal noise (noise pattern changes between frames) -@item u -uniform noise (gaussian otherwise) -@end table -@end table - -@subsection Examples - -Add temporal and uniform noise to input video: -@example -noise=alls=20:allf=t+u -@end example - -@section null - -Pass the video source unchanged to the output. - -@section ocv - -Apply video transform using libopencv. - -To enable this filter install libopencv library and headers and -configure FFmpeg with @code{--enable-libopencv}. - -This filter accepts the following parameters: - -@table @option - -@item filter_name -The name of the libopencv filter to apply. - -@item filter_params -The parameters to pass to the libopencv filter. If not specified the default -values are assumed. - -@end table - -Refer to the official libopencv documentation for more precise -information: -@url{http://opencv.willowgarage.com/documentation/c/image_filtering.html} - -Follows the list of supported libopencv filters. - -@anchor{dilate} -@subsection dilate - -Dilate an image by using a specific structuring element. -This filter corresponds to the libopencv function @code{cvDilate}. - -It accepts the parameters: @var{struct_el}|@var{nb_iterations}. - -@var{struct_el} represents a structuring element, and has the syntax: -@var{cols}x@var{rows}+@var{anchor_x}x@var{anchor_y}/@var{shape} - -@var{cols} and @var{rows} represent the number of columns and rows of -the structuring element, @var{anchor_x} and @var{anchor_y} the anchor -point, and @var{shape} the shape for the structuring element, and -can be one of the values "rect", "cross", "ellipse", "custom". - -If the value for @var{shape} is "custom", it must be followed by a -string of the form "=@var{filename}". The file with name -@var{filename} is assumed to represent a binary image, with each -printable character corresponding to a bright pixel. When a custom -@var{shape} is used, @var{cols} and @var{rows} are ignored, the number -or columns and rows of the read file are assumed instead. - -The default value for @var{struct_el} is "3x3+0x0/rect". - -@var{nb_iterations} specifies the number of times the transform is -applied to the image, and defaults to 1. - -Follow some example: -@example -# use the default values -ocv=dilate - -# dilate using a structuring element with a 5x5 cross, iterate two times -ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2 - -# read the shape from the file diamond.shape, iterate two times -# the file diamond.shape may contain a pattern of characters like this: -# * -# *** -# ***** -# *** -# * -# the specified cols and rows are ignored (but not the anchor point coordinates) -ocv=dilate:0x0+2x2/custom=diamond.shape|2 -@end example - -@subsection erode - -Erode an image by using a specific structuring element. -This filter corresponds to the libopencv function @code{cvErode}. - -The filter accepts the parameters: @var{struct_el}:@var{nb_iterations}, -with the same syntax and semantics as the @ref{dilate} filter. - -@subsection smooth - -Smooth the input video. - -The filter takes the following parameters: -@var{type}|@var{param1}|@var{param2}|@var{param3}|@var{param4}. - -@var{type} is the type of smooth filter to apply, and can be one of -the following values: "blur", "blur_no_scale", "median", "gaussian", -"bilateral". The default value is "gaussian". - -@var{param1}, @var{param2}, @var{param3}, and @var{param4} are -parameters whose meanings depend on smooth type. @var{param1} and -@var{param2} accept integer positive values or 0, @var{param3} and -@var{param4} accept float values. - -The default value for @var{param1} is 3, the default value for the -other parameters is 0. - -These parameters correspond to the parameters assigned to the -libopencv function @code{cvSmooth}. - -@anchor{overlay} -@section overlay - -Overlay one video on top of another. - -It takes two inputs and one output, the first input is the "main" -video on which the second input is overlayed. - -This filter accepts the following parameters: - -A description of the accepted options follows. - -@table @option -@item x -@item y -Set the expression for the x and y coordinates of the overlayed video -on the main video. Default value is "0" for both expressions. In case -the expression is invalid, it is set to a huge value (meaning that the -overlay will not be displayed within the output visible area). - -@item eval -Set when the expressions for @option{x}, and @option{y} are evaluated. - -It accepts the following values: -@table @samp -@item init -only evaluate expressions once during the filter initialization or -when a command is processed - -@item frame -evaluate expressions for each incoming frame -@end table - -Default value is @samp{frame}. - -@item shortest -If set to 1, force the output to terminate when the shortest input -terminates. Default value is 0. - -@item format -Set the format for the output video. - -It accepts the following values: -@table @samp -@item yuv420 -force YUV420 output - -@item yuv444 -force YUV444 output - -@item rgb -force RGB output -@end table - -Default value is @samp{yuv420}. - -@item rgb @emph{(deprecated)} -If set to 1, force the filter to accept inputs in the RGB -color space. Default value is 0. This option is deprecated, use -@option{format} instead. - -@item repeatlast -If set to 1, force the filter to draw the last overlay frame over the -main input until the end of the stream. A value of 0 disables this -behavior. Default value is 1. -@end table - -The @option{x}, and @option{y} expressions can contain the following -parameters. - -@table @option -@item main_w, W -@item main_h, H -main input width and height - -@item overlay_w, w -@item overlay_h, h -overlay input width and height - -@item x -@item y -the computed values for @var{x} and @var{y}. They are evaluated for -each new frame. - -@item hsub -@item vsub -horizontal and vertical chroma subsample values of the output -format. For example for the pixel format "yuv422p" @var{hsub} is 2 and -@var{vsub} is 1. - -@item n -the number of input frame, starting from 0 - -@item pos -the position in the file of the input frame, NAN if unknown - -@item t -timestamp expressed in seconds, NAN if the input timestamp is unknown -@end table - -Note that the @var{n}, @var{pos}, @var{t} variables are available only -when evaluation is done @emph{per frame}, and will evaluate to NAN -when @option{eval} is set to @samp{init}. - -Be aware that frames are taken from each input video in timestamp -order, hence, if their initial timestamps differ, it is a good idea -to pass the two inputs through a @var{setpts=PTS-STARTPTS} filter to -have them begin in the same zero timestamp, as it does the example for -the @var{movie} filter. - -You can chain together more overlays but you should test the -efficiency of such approach. - -@subsection Commands - -This filter supports the following commands: -@table @option -@item x -@item y -Modify the x and y of the overlay input. -The command accepts the same syntax of the corresponding option. - -If the specified expression is not valid, it is kept at its current -value. -@end table - -@subsection Examples - -@itemize -@item -Draw the overlay at 10 pixels from the bottom right corner of the main -video: -@example -overlay=main_w-overlay_w-10:main_h-overlay_h-10 -@end example - -Using named options the example above becomes: -@example -overlay=x=main_w-overlay_w-10:y=main_h-overlay_h-10 -@end example - -@item -Insert a transparent PNG logo in the bottom left corner of the input, -using the @command{ffmpeg} tool with the @code{-filter_complex} option: -@example -ffmpeg -i input -i logo -filter_complex 'overlay=10:main_h-overlay_h-10' output -@end example - -@item -Insert 2 different transparent PNG logos (second logo on bottom -right corner) using the @command{ffmpeg} tool: -@example -ffmpeg -i input -i logo1 -i logo2 -filter_complex 'overlay=x=10:y=H-h-10,overlay=x=W-w-10:y=H-h-10' output -@end example - -@item -Add a transparent color layer on top of the main video, @code{WxH} -must specify the size of the main input to the overlay filter: -@example -color=color=red@@.3:size=WxH [over]; [in][over] overlay [out] -@end example - -@item -Play an original video and a filtered version (here with the deshake -filter) side by side using the @command{ffplay} tool: -@example -ffplay input.avi -vf 'split[a][b]; [a]pad=iw*2:ih[src]; [b]deshake[filt]; [src][filt]overlay=w' -@end example - -The above command is the same as: -@example -ffplay input.avi -vf 'split[b], pad=iw*2[src], [b]deshake, [src]overlay=w' -@end example - -@item -Make a sliding overlay appearing from the left to the right top part of the -screen starting since time 2: -@example -overlay=x='if(gte(t,2), -w+(t-2)*20, NAN)':y=0 -@end example - -@item -Compose output by putting two input videos side to side: -@example -ffmpeg -i left.avi -i right.avi -filter_complex " -nullsrc=size=200x100 [background]; -[0:v] setpts=PTS-STARTPTS, scale=100x100 [left]; -[1:v] setpts=PTS-STARTPTS, scale=100x100 [right]; -[background][left] overlay=shortest=1 [background+left]; -[background+left][right] overlay=shortest=1:x=100 [left+right] -" -@end example - -@item -Chain several overlays in cascade: -@example -nullsrc=s=200x200 [bg]; -testsrc=s=100x100, split=4 [in0][in1][in2][in3]; -[in0] lutrgb=r=0, [bg] overlay=0:0 [mid0]; -[in1] lutrgb=g=0, [mid0] overlay=100:0 [mid1]; -[in2] lutrgb=b=0, [mid1] overlay=0:100 [mid2]; -[in3] null, [mid2] overlay=100:100 [out0] -@end example - -@end itemize - -@section owdenoise - -Apply Overcomplete Wavelet denoiser. - -The filter accepts the following options: - -@table @option -@item depth -Set depth. - -Larger depth values will denoise lower frequency components more, but -slow down filtering. - -Must be an int in the range 8-16, default is @code{8}. - -@item luma_strength, ls -Set luma strength. - -Must be a double value in the range 0-1000, default is @code{1.0}. - -@item chroma_strength, cs -Set chroma strength. - -Must be a double value in the range 0-1000, default is @code{1.0}. -@end table - -@section pad - -Add paddings to the input image, and place the original input at the -given coordinates @var{x}, @var{y}. - -This filter accepts the following parameters: - -@table @option -@item width, w -@item height, h -Specify an expression for the size of the output image with the -paddings added. If the value for @var{width} or @var{height} is 0, the -corresponding input size is used for the output. - -The @var{width} expression can reference the value set by the -@var{height} expression, and vice versa. - -The default value of @var{width} and @var{height} is 0. - -@item x -@item y -Specify an expression for the offsets where to place the input image -in the padded area with respect to the top/left border of the output -image. - -The @var{x} expression can reference the value set by the @var{y} -expression, and vice versa. - -The default value of @var{x} and @var{y} is 0. - -@item color -Specify the color of the padded area. For the syntax of this option, -check the "Color" section in the ffmpeg-utils manual. - -The default value of @var{color} is "black". -@end table - -The value for the @var{width}, @var{height}, @var{x}, and @var{y} -options are expressions containing the following constants: - -@table @option -@item in_w -@item in_h -the input video width and height - -@item iw -@item ih -same as @var{in_w} and @var{in_h} - -@item out_w -@item out_h -the output width and height, that is the size of the padded area as -specified by the @var{width} and @var{height} expressions - -@item ow -@item oh -same as @var{out_w} and @var{out_h} - -@item x -@item y -x and y offsets as specified by the @var{x} and @var{y} -expressions, or NAN if not yet specified - -@item a -same as @var{iw} / @var{ih} - -@item sar -input sample aspect ratio - -@item dar -input display aspect ratio, it is the same as (@var{iw} / @var{ih}) * @var{sar} - -@item hsub -@item vsub -horizontal and vertical chroma subsample values. For example for the -pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. -@end table - -@subsection Examples - -@itemize -@item -Add paddings with color "violet" to the input video. Output video -size is 640x480, the top-left corner of the input video is placed at -column 0, row 40: -@example -pad=640:480:0:40:violet -@end example - -The example above is equivalent to the following command: -@example -pad=width=640:height=480:x=0:y=40:color=violet -@end example - -@item -Pad the input to get an output with dimensions increased by 3/2, -and put the input video at the center of the padded area: -@example -pad="3/2*iw:3/2*ih:(ow-iw)/2:(oh-ih)/2" -@end example - -@item -Pad the input to get a squared output with size equal to the maximum -value between the input width and height, and put the input video at -the center of the padded area: -@example -pad="max(iw\,ih):ow:(ow-iw)/2:(oh-ih)/2" -@end example - -@item -Pad the input to get a final w/h ratio of 16:9: -@example -pad="ih*16/9:ih:(ow-iw)/2:(oh-ih)/2" -@end example - -@item -In case of anamorphic video, in order to set the output display aspect -correctly, it is necessary to use @var{sar} in the expression, -according to the relation: -@example -(ih * X / ih) * sar = output_dar -X = output_dar / sar -@end example - -Thus the previous example needs to be modified to: -@example -pad="ih*16/9/sar:ih:(ow-iw)/2:(oh-ih)/2" -@end example - -@item -Double output size and put the input video in the bottom-right -corner of the output padded area: -@example -pad="2*iw:2*ih:ow-iw:oh-ih" -@end example -@end itemize - -@section perspective - -Correct perspective of video not recorded perpendicular to the screen. - -A description of the accepted parameters follows. - -@table @option -@item x0 -@item y0 -@item x1 -@item y1 -@item x2 -@item y2 -@item x3 -@item y3 -Set coordinates expression for top left, top right, bottom left and bottom right corners. -Default values are @code{0:0:W:0:0:H:W:H} with which perspective will remain unchanged. - -The expressions can use the following variables: - -@table @option -@item W -@item H -the width and height of video frame. -@end table - -@item interpolation -Set interpolation for perspective correction. - -It accepts the following values: -@table @samp -@item linear -@item cubic -@end table - -Default value is @samp{linear}. -@end table - -@section phase - -Delay interlaced video by one field time so that the field order changes. - -The intended use is to fix PAL movies that have been captured with the -opposite field order to the film-to-video transfer. - -A description of the accepted parameters follows. - -@table @option -@item mode -Set phase mode. - -It accepts the following values: -@table @samp -@item t -Capture field order top-first, transfer bottom-first. -Filter will delay the bottom field. - -@item b -Capture field order bottom-first, transfer top-first. -Filter will delay the top field. - -@item p -Capture and transfer with the same field order. This mode only exists -for the documentation of the other options to refer to, but if you -actually select it, the filter will faithfully do nothing. - -@item a -Capture field order determined automatically by field flags, transfer -opposite. -Filter selects among @samp{t} and @samp{b} modes on a frame by frame -basis using field flags. If no field information is available, -then this works just like @samp{u}. - -@item u -Capture unknown or varying, transfer opposite. -Filter selects among @samp{t} and @samp{b} on a frame by frame basis by -analyzing the images and selecting the alternative that produces best -match between the fields. - -@item T -Capture top-first, transfer unknown or varying. -Filter selects among @samp{t} and @samp{p} using image analysis. - -@item B -Capture bottom-first, transfer unknown or varying. -Filter selects among @samp{b} and @samp{p} using image analysis. - -@item A -Capture determined by field flags, transfer unknown or varying. -Filter selects among @samp{t}, @samp{b} and @samp{p} using field flags and -image analysis. If no field information is available, then this works just -like @samp{U}. This is the default mode. - -@item U -Both capture and transfer unknown or varying. -Filter selects among @samp{t}, @samp{b} and @samp{p} using image analysis only. -@end table -@end table - -@section pixdesctest - -Pixel format descriptor test filter, mainly useful for internal -testing. The output video should be equal to the input video. - -For example: -@example -format=monow, pixdesctest -@end example - -can be used to test the monowhite pixel format descriptor definition. - -@section pp - -Enable the specified chain of postprocessing subfilters using libpostproc. This -library should be automatically selected with a GPL build (@code{--enable-gpl}). -Subfilters must be separated by '/' and can be disabled by prepending a '-'. -Each subfilter and some options have a short and a long name that can be used -interchangeably, i.e. dr/dering are the same. - -The filters accept the following options: - -@table @option -@item subfilters -Set postprocessing subfilters string. -@end table - -All subfilters share common options to determine their scope: - -@table @option -@item a/autoq -Honor the quality commands for this subfilter. - -@item c/chrom -Do chrominance filtering, too (default). - -@item y/nochrom -Do luminance filtering only (no chrominance). - -@item n/noluma -Do chrominance filtering only (no luminance). -@end table - -These options can be appended after the subfilter name, separated by a '|'. - -Available subfilters are: - -@table @option -@item hb/hdeblock[|difference[|flatness]] -Horizontal deblocking filter -@table @option -@item difference -Difference factor where higher values mean more deblocking (default: @code{32}). -@item flatness -Flatness threshold where lower values mean more deblocking (default: @code{39}). -@end table - -@item vb/vdeblock[|difference[|flatness]] -Vertical deblocking filter -@table @option -@item difference -Difference factor where higher values mean more deblocking (default: @code{32}). -@item flatness -Flatness threshold where lower values mean more deblocking (default: @code{39}). -@end table - -@item ha/hadeblock[|difference[|flatness]] -Accurate horizontal deblocking filter -@table @option -@item difference -Difference factor where higher values mean more deblocking (default: @code{32}). -@item flatness -Flatness threshold where lower values mean more deblocking (default: @code{39}). -@end table - -@item va/vadeblock[|difference[|flatness]] -Accurate vertical deblocking filter -@table @option -@item difference -Difference factor where higher values mean more deblocking (default: @code{32}). -@item flatness -Flatness threshold where lower values mean more deblocking (default: @code{39}). -@end table -@end table - -The horizontal and vertical deblocking filters share the difference and -flatness values so you cannot set different horizontal and vertical -thresholds. - -@table @option -@item h1/x1hdeblock -Experimental horizontal deblocking filter - -@item v1/x1vdeblock -Experimental vertical deblocking filter - -@item dr/dering -Deringing filter - -@item tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer -@table @option -@item threshold1 -larger -> stronger filtering -@item threshold2 -larger -> stronger filtering -@item threshold3 -larger -> stronger filtering -@end table - -@item al/autolevels[:f/fullyrange], automatic brightness / contrast correction -@table @option -@item f/fullyrange -Stretch luminance to @code{0-255}. -@end table - -@item lb/linblenddeint -Linear blend deinterlacing filter that deinterlaces the given block by -filtering all lines with a @code{(1 2 1)} filter. - -@item li/linipoldeint -Linear interpolating deinterlacing filter that deinterlaces the given block by -linearly interpolating every second line. - -@item ci/cubicipoldeint -Cubic interpolating deinterlacing filter deinterlaces the given block by -cubically interpolating every second line. - -@item md/mediandeint -Median deinterlacing filter that deinterlaces the given block by applying a -median filter to every second line. - -@item fd/ffmpegdeint -FFmpeg deinterlacing filter that deinterlaces the given block by filtering every -second line with a @code{(-1 4 2 4 -1)} filter. - -@item l5/lowpass5 -Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given -block by filtering all lines with a @code{(-1 2 6 2 -1)} filter. - -@item fq/forceQuant[|quantizer] -Overrides the quantizer table from the input with the constant quantizer you -specify. -@table @option -@item quantizer -Quantizer to use -@end table - -@item de/default -Default pp filter combination (@code{hb|a,vb|a,dr|a}) - -@item fa/fast -Fast pp filter combination (@code{h1|a,v1|a,dr|a}) - -@item ac -High quality pp filter combination (@code{ha|a|128|7,va|a,dr|a}) -@end table - -@subsection Examples - -@itemize -@item -Apply horizontal and vertical deblocking, deringing and automatic -brightness/contrast: -@example -pp=hb/vb/dr/al -@end example - -@item -Apply default filters without brightness/contrast correction: -@example -pp=de/-al -@end example - -@item -Apply default filters and temporal denoiser: -@example -pp=default/tmpnoise|1|2|3 -@end example - -@item -Apply deblocking on luminance only, and switch vertical deblocking on or off -automatically depending on available CPU time: -@example -pp=hb|y/vb|a -@end example -@end itemize - -@section psnr - -Obtain the average, maximum and minimum PSNR (Peak Signal to Noise -Ratio) between two input videos. - -This filter takes in input two input videos, the first input is -considered the "main" source and is passed unchanged to the -output. The second input is used as a "reference" video for computing -the PSNR. - -Both video inputs must have the same resolution and pixel format for -this filter to work correctly. Also it assumes that both inputs -have the same number of frames, which are compared one by one. - -The obtained average PSNR is printed through the logging system. - -The filter stores the accumulated MSE (mean squared error) of each -frame, and at the end of the processing it is averaged across all frames -equally, and the following formula is applied to obtain the PSNR: - -@example -PSNR = 10*log10(MAX^2/MSE) -@end example - -Where MAX is the average of the maximum values of each component of the -image. - -The description of the accepted parameters follows. - -@table @option -@item stats_file, f -If specified the filter will use the named file to save the PSNR of -each individual frame. -@end table - -The file printed if @var{stats_file} is selected, contains a sequence of -key/value pairs of the form @var{key}:@var{value} for each compared -couple of frames. - -A description of each shown parameter follows: - -@table @option -@item n -sequential number of the input frame, starting from 1 - -@item mse_avg -Mean Square Error pixel-by-pixel average difference of the compared -frames, averaged over all the image components. - -@item mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a -Mean Square Error pixel-by-pixel average difference of the compared -frames for the component specified by the suffix. - -@item psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a -Peak Signal to Noise ratio of the compared frames for the component -specified by the suffix. -@end table - -For example: -@example -movie=ref_movie.mpg, setpts=PTS-STARTPTS [main]; -[main][ref] psnr="stats_file=stats.log" [out] -@end example - -On this example the input file being processed is compared with the -reference file @file{ref_movie.mpg}. The PSNR of each individual frame -is stored in @file{stats.log}. - -@section pullup - -Pulldown reversal (inverse telecine) filter, capable of handling mixed -hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive -content. - -The pullup filter is designed to take advantage of future context in making -its decisions. This filter is stateless in the sense that it does not lock -onto a pattern to follow, but it instead looks forward to the following -fields in order to identify matches and rebuild progressive frames. - -To produce content with an even framerate, insert the fps filter after -pullup, use @code{fps=24000/1001} if the input frame rate is 29.97fps, -@code{fps=24} for 30fps and the (rare) telecined 25fps input. - -The filter accepts the following options: - -@table @option -@item jl -@item jr -@item jt -@item jb -These options set the amount of "junk" to ignore at the left, right, top, and -bottom of the image, respectively. Left and right are in units of 8 pixels, -while top and bottom are in units of 2 lines. -The default is 8 pixels on each side. - -@item sb -Set the strict breaks. Setting this option to 1 will reduce the chances of -filter generating an occasional mismatched frame, but it may also cause an -excessive number of frames to be dropped during high motion sequences. -Conversely, setting it to -1 will make filter match fields more easily. -This may help processing of video where there is slight blurring between -the fields, but may also cause there to be interlaced frames in the output. -Default value is @code{0}. - -@item mp -Set the metric plane to use. It accepts the following values: -@table @samp -@item l -Use luma plane. - -@item u -Use chroma blue plane. - -@item v -Use chroma red plane. -@end table - -This option may be set to use chroma plane instead of the default luma plane -for doing filter's computations. This may improve accuracy on very clean -source material, but more likely will decrease accuracy, especially if there -is chroma noise (rainbow effect) or any grayscale video. -The main purpose of setting @option{mp} to a chroma plane is to reduce CPU -load and make pullup usable in realtime on slow machines. -@end table - -For best results (without duplicated frames in the output file) it is -necessary to change the output frame rate. For example, to inverse -telecine NTSC input: -@example -ffmpeg -i input -vf pullup -r 24000/1001 ... -@end example - -@section removelogo - -Suppress a TV station logo, using an image file to determine which -pixels comprise the logo. It works by filling in the pixels that -comprise the logo with neighboring pixels. - -The filter accepts the following options: - -@table @option -@item filename, f -Set the filter bitmap file, which can be any image format supported by -libavformat. The width and height of the image file must match those of the -video stream being processed. -@end table - -Pixels in the provided bitmap image with a value of zero are not -considered part of the logo, non-zero pixels are considered part of -the logo. If you use white (255) for the logo and black (0) for the -rest, you will be safe. For making the filter bitmap, it is -recommended to take a screen capture of a black frame with the logo -visible, and then using a threshold filter followed by the erode -filter once or twice. - -If needed, little splotches can be fixed manually. Remember that if -logo pixels are not covered, the filter quality will be much -reduced. Marking too many pixels as part of the logo does not hurt as -much, but it will increase the amount of blurring needed to cover over -the image and will destroy more information than necessary, and extra -pixels will slow things down on a large logo. - -@section rotate - -Rotate video by an arbitrary angle expressed in radians. - -The filter accepts the following options: - -A description of the optional parameters follows. -@table @option -@item angle, a -Set an expression for the angle by which to rotate the input video -clockwise, expressed as a number of radians. A negative value will -result in a counter-clockwise rotation. By default it is set to "0". - -This expression is evaluated for each frame. - -@item out_w, ow -Set the output width expression, default value is "iw". -This expression is evaluated just once during configuration. - -@item out_h, oh -Set the output height expression, default value is "ih". -This expression is evaluated just once during configuration. - -@item bilinear -Enable bilinear interpolation if set to 1, a value of 0 disables -it. Default value is 1. - -@item fillcolor, c -Set the color used to fill the output area not covered by the rotated -image. For the generalsyntax of this option, check the "Color" section in the -ffmpeg-utils manual. If the special value "none" is selected then no -background is printed (useful for example if the background is never shown). - -Default value is "black". -@end table - -The expressions for the angle and the output size can contain the -following constants and functions: - -@table @option -@item n -sequential number of the input frame, starting from 0. It is always NAN -before the first frame is filtered. - -@item t -time in seconds of the input frame, it is set to 0 when the filter is -configured. It is always NAN before the first frame is filtered. - -@item hsub -@item vsub -horizontal and vertical chroma subsample values. For example for the -pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. - -@item in_w, iw -@item in_h, ih -the input video width and heigth - -@item out_w, ow -@item out_h, oh -the output width and heigth, that is the size of the padded area as -specified by the @var{width} and @var{height} expressions - -@item rotw(a) -@item roth(a) -the minimal width/height required for completely containing the input -video rotated by @var{a} radians. - -These are only available when computing the @option{out_w} and -@option{out_h} expressions. -@end table - -@subsection Examples - -@itemize -@item -Rotate the input by PI/6 radians clockwise: -@example -rotate=PI/6 -@end example - -@item -Rotate the input by PI/6 radians counter-clockwise: -@example -rotate=-PI/6 -@end example - -@item -Apply a constant rotation with period T, starting from an angle of PI/3: -@example -rotate=PI/3+2*PI*t/T -@end example - -@item -Make the input video rotation oscillating with a period of T -seconds and an amplitude of A radians: -@example -rotate=A*sin(2*PI/T*t) -@end example - -@item -Rotate the video, output size is choosen so that the whole rotating -input video is always completely contained in the output: -@example -rotate='2*PI*t:ow=hypot(iw,ih):oh=ow' -@end example - -@item -Rotate the video, reduce the output size so that no background is ever -shown: -@example -rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none -@end example -@end itemize - -@subsection Commands - -The filter supports the following commands: - -@table @option -@item a, angle -Set the angle expression. -The command accepts the same syntax of the corresponding option. - -If the specified expression is not valid, it is kept at its current -value. -@end table - -@section sab - -Apply Shape Adaptive Blur. - -The filter accepts the following options: - -@table @option -@item luma_radius, lr -Set luma blur filter strength, must be a value in range 0.1-4.0, default -value is 1.0. A greater value will result in a more blurred image, and -in slower processing. - -@item luma_pre_filter_radius, lpfr -Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default -value is 1.0. - -@item luma_strength, ls -Set luma maximum difference between pixels to still be considered, must -be a value in the 0.1-100.0 range, default value is 1.0. - -@item chroma_radius, cr -Set chroma blur filter strength, must be a value in range 0.1-4.0. A -greater value will result in a more blurred image, and in slower -processing. - -@item chroma_pre_filter_radius, cpfr -Set chroma pre-filter radius, must be a value in the 0.1-2.0 range. - -@item chroma_strength, cs -Set chroma maximum difference between pixels to still be considered, -must be a value in the 0.1-100.0 range. -@end table - -Each chroma option value, if not explicitly specified, is set to the -corresponding luma option value. - -@anchor{scale} -@section scale - -Scale (resize) the input video, using the libswscale library. - -The scale filter forces the output display aspect ratio to be the same -of the input, by changing the output sample aspect ratio. - -If the input image format is different from the format requested by -the next filter, the scale filter will convert the input to the -requested format. - -@subsection Options -The filter accepts the following options, or any of the options -supported by the libswscale scaler. - -See @ref{scaler_options,,the ffmpeg-scaler manual,ffmpeg-scaler} for -the complete list of scaler options. - -@table @option -@item width, w -@item height, h -Set the output video dimension expression. Default value is the input -dimension. - -If the value is 0, the input width is used for the output. - -If one of the values is -1, the scale filter will use a value that -maintains the aspect ratio of the input image, calculated from the -other specified dimension. If both of them are -1, the input size is -used - -See below for the list of accepted constants for use in the dimension -expression. - -@item interl -Set the interlacing mode. It accepts the following values: - -@table @samp -@item 1 -Force interlaced aware scaling. - -@item 0 -Do not apply interlaced scaling. - -@item -1 -Select interlaced aware scaling depending on whether the source frames -are flagged as interlaced or not. -@end table - -Default value is @samp{0}. - -@item flags -Set libswscale scaling flags. See -@ref{sws_flags,,the ffmpeg-scaler manual,ffmpeg-scaler} for the -complete list of values. If not explictly specified the filter applies -the default flags. - -@item size, s -Set the video size. For the syntax of this option, check the "Video size" -section in the ffmpeg-utils manual. - -@item in_color_matrix -@item out_color_matrix -Set in/output YCbCr color space type. - -This allows the autodetected value to be overridden as well as allows forcing -a specific value used for the output and encoder. - -If not specified, the color space type depends on the pixel format. - -Possible values: - -@table @samp -@item auto -Choose automatically. - -@item bt709 -Format conforming to International Telecommunication Union (ITU) -Recommendation BT.709. - -@item fcc -Set color space conforming to the United States Federal Communications -Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a). - -@item bt601 -Set color space conforming to: - -@itemize -@item -ITU Radiocommunication Sector (ITU-R) Recommendation BT.601 - -@item -ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G - -@item -Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004 - -@end itemize - -@item smpte240m -Set color space conforming to SMPTE ST 240:1999. -@end table - -@item in_range -@item out_range -Set in/output YCbCr sample range. - -This allows the autodetected value to be overridden as well as allows forcing -a specific value used for the output and encoder. If not specified, the -range depends on the pixel format. Possible values: - -@table @samp -@item auto -Choose automatically. - -@item jpeg/full/pc -Set full range (0-255 in case of 8-bit luma). - -@item mpeg/tv -Set "MPEG" range (16-235 in case of 8-bit luma). -@end table - -@item force_original_aspect_ratio -Enable decreasing or increasing output video width or height if necessary to -keep the original aspect ratio. Possible values: - -@table @samp -@item disable -Scale the video as specified and disable this feature. - -@item decrease -The output video dimensions will automatically be decreased if needed. - -@item increase -The output video dimensions will automatically be increased if needed. - -@end table - -One useful instance of this option is that when you know a specific device's -maximum allowed resolution, you can use this to limit the output video to -that, while retaining the aspect ratio. For example, device A allows -1280x720 playback, and your video is 1920x800. Using this option (set it to -decrease) and specifying 1280x720 to the command line makes the output -1280x533. - -Please note that this is a different thing than specifying -1 for @option{w} -or @option{h}, you still need to specify the output resolution for this option -to work. - -@end table - -The values of the @option{w} and @option{h} options are expressions -containing the following constants: - -@table @var -@item in_w -@item in_h -the input width and height - -@item iw -@item ih -same as @var{in_w} and @var{in_h} - -@item out_w -@item out_h -the output (scaled) width and height - -@item ow -@item oh -same as @var{out_w} and @var{out_h} - -@item a -same as @var{iw} / @var{ih} - -@item sar -input sample aspect ratio - -@item dar -input display aspect ratio. Calculated from @code{(iw / ih) * sar}. - -@item hsub -@item vsub -horizontal and vertical input chroma subsample values. For example for the -pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. - -@item ohsub -@item ovsub -horizontal and vertical output chroma subsample values. For example for the -pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. -@end table - -@subsection Examples - -@itemize -@item -Scale the input video to a size of 200x100: -@example -scale=w=200:h=100 -@end example - -This is equivalent to: -@example -scale=200:100 -@end example - -or: -@example -scale=200x100 -@end example - -@item -Specify a size abbreviation for the output size: -@example -scale=qcif -@end example - -which can also be written as: -@example -scale=size=qcif -@end example - -@item -Scale the input to 2x: -@example -scale=w=2*iw:h=2*ih -@end example - -@item -The above is the same as: -@example -scale=2*in_w:2*in_h -@end example - -@item -Scale the input to 2x with forced interlaced scaling: -@example -scale=2*iw:2*ih:interl=1 -@end example - -@item -Scale the input to half size: -@example -scale=w=iw/2:h=ih/2 -@end example - -@item -Increase the width, and set the height to the same size: -@example -scale=3/2*iw:ow -@end example - -@item -Seek for Greek harmony: -@example -scale=iw:1/PHI*iw -scale=ih*PHI:ih -@end example - -@item -Increase the height, and set the width to 3/2 of the height: -@example -scale=w=3/2*oh:h=3/5*ih -@end example - -@item -Increase the size, but make the size a multiple of the chroma -subsample values: -@example -scale="trunc(3/2*iw/hsub)*hsub:trunc(3/2*ih/vsub)*vsub" -@end example - -@item -Increase the width to a maximum of 500 pixels, keep the same input -aspect ratio: -@example -scale=w='min(500\, iw*3/2):h=-1' -@end example -@end itemize - -@section separatefields - -The @code{separatefields} takes a frame-based video input and splits -each frame into its components fields, producing a new half height clip -with twice the frame rate and twice the frame count. - -This filter use field-dominance information in frame to decide which -of each pair of fields to place first in the output. -If it gets it wrong use @ref{setfield} filter before @code{separatefields} filter. - -@section setdar, setsar - -The @code{setdar} filter sets the Display Aspect Ratio for the filter -output video. - -This is done by changing the specified Sample (aka Pixel) Aspect -Ratio, according to the following equation: -@example -@var{DAR} = @var{HORIZONTAL_RESOLUTION} / @var{VERTICAL_RESOLUTION} * @var{SAR} -@end example - -Keep in mind that the @code{setdar} filter does not modify the pixel -dimensions of the video frame. Also the display aspect ratio set by -this filter may be changed by later filters in the filterchain, -e.g. in case of scaling or if another "setdar" or a "setsar" filter is -applied. - -The @code{setsar} filter sets the Sample (aka Pixel) Aspect Ratio for -the filter output video. - -Note that as a consequence of the application of this filter, the -output display aspect ratio will change according to the equation -above. - -Keep in mind that the sample aspect ratio set by the @code{setsar} -filter may be changed by later filters in the filterchain, e.g. if -another "setsar" or a "setdar" filter is applied. - -The filters accept the following options: - -@table @option -@item r, ratio, dar (@code{setdar} only), sar (@code{setsar} only) -Set the aspect ratio used by the filter. - -The parameter can be a floating point number string, an expression, or -a string of the form @var{num}:@var{den}, where @var{num} and -@var{den} are the numerator and denominator of the aspect ratio. If -the parameter is not specified, it is assumed the value "0". -In case the form "@var{num}:@var{den}" is used, the @code{:} character -should be escaped. - -@item max -Set the maximum integer value to use for expressing numerator and -denominator when reducing the expressed aspect ratio to a rational. -Default value is @code{100}. - -@end table - -The parameter @var{sar} is an expression containing -the following constants: - -@table @option -@item E, PI, PHI -the corresponding mathematical approximated values for e -(euler number), pi (greek PI), phi (golden ratio) - -@item w, h -the input width and height - -@item a -same as @var{w} / @var{h} - -@item sar -input sample aspect ratio - -@item dar -input display aspect ratio, it is the same as (@var{w} / @var{h}) * @var{sar} - -@item hsub, vsub -horizontal and vertical chroma subsample values. For example for the -pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. -@end table - -@subsection Examples - -@itemize - -@item -To change the display aspect ratio to 16:9, specify one of the following: -@example -setdar=dar=1.77777 -setdar=dar=16/9 -setdar=dar=1.77777 -@end example - -@item -To change the sample aspect ratio to 10:11, specify: -@example -setsar=sar=10/11 -@end example - -@item -To set a display aspect ratio of 16:9, and specify a maximum integer value of -1000 in the aspect ratio reduction, use the command: -@example -setdar=ratio=16/9:max=1000 -@end example - -@end itemize - -@anchor{setfield} -@section setfield - -Force field for the output video frame. - -The @code{setfield} filter marks the interlace type field for the -output frames. It does not change the input frame, but only sets the -corresponding property, which affects how the frame is treated by -following filters (e.g. @code{fieldorder} or @code{yadif}). - -The filter accepts the following options: - -@table @option - -@item mode -Available values are: - -@table @samp -@item auto -Keep the same field property. - -@item bff -Mark the frame as bottom-field-first. - -@item tff -Mark the frame as top-field-first. - -@item prog -Mark the frame as progressive. -@end table -@end table - -@section showinfo - -Show a line containing various information for each input video frame. -The input video is not modified. - -The shown line contains a sequence of key/value pairs of the form -@var{key}:@var{value}. - -A description of each shown parameter follows: - -@table @option -@item n -sequential number of the input frame, starting from 0 - -@item pts -Presentation TimeStamp of the input frame, expressed as a number of -time base units. The time base unit depends on the filter input pad. - -@item pts_time -Presentation TimeStamp of the input frame, expressed as a number of -seconds - -@item pos -position of the frame in the input stream, -1 if this information in -unavailable and/or meaningless (for example in case of synthetic video) - -@item fmt -pixel format name - -@item sar -sample aspect ratio of the input frame, expressed in the form -@var{num}/@var{den} - -@item s -size of the input frame. For the syntax of this option, check the "Video size" -section in the ffmpeg-utils manual. - -@item i -interlaced mode ("P" for "progressive", "T" for top field first, "B" -for bottom field first) - -@item iskey -1 if the frame is a key frame, 0 otherwise - -@item type -picture type of the input frame ("I" for an I-frame, "P" for a -P-frame, "B" for a B-frame, "?" for unknown type). -Check also the documentation of the @code{AVPictureType} enum and of -the @code{av_get_picture_type_char} function defined in -@file{libavutil/avutil.h}. - -@item checksum -Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame - -@item plane_checksum -Adler-32 checksum (printed in hexadecimal) of each plane of the input frame, -expressed in the form "[@var{c0} @var{c1} @var{c2} @var{c3}]" -@end table - -@anchor{smartblur} -@section smartblur - -Blur the input video without impacting the outlines. - -The filter accepts the following options: - -@table @option -@item luma_radius, lr -Set the luma radius. The option value must be a float number in -the range [0.1,5.0] that specifies the variance of the gaussian filter -used to blur the image (slower if larger). Default value is 1.0. - -@item luma_strength, ls -Set the luma strength. The option value must be a float number -in the range [-1.0,1.0] that configures the blurring. A value included -in [0.0,1.0] will blur the image whereas a value included in -[-1.0,0.0] will sharpen the image. Default value is 1.0. - -@item luma_threshold, lt -Set the luma threshold used as a coefficient to determine -whether a pixel should be blurred or not. The option value must be an -integer in the range [-30,30]. A value of 0 will filter all the image, -a value included in [0,30] will filter flat areas and a value included -in [-30,0] will filter edges. Default value is 0. - -@item chroma_radius, cr -Set the chroma radius. The option value must be a float number in -the range [0.1,5.0] that specifies the variance of the gaussian filter -used to blur the image (slower if larger). Default value is 1.0. - -@item chroma_strength, cs -Set the chroma strength. The option value must be a float number -in the range [-1.0,1.0] that configures the blurring. A value included -in [0.0,1.0] will blur the image whereas a value included in -[-1.0,0.0] will sharpen the image. Default value is 1.0. - -@item chroma_threshold, ct -Set the chroma threshold used as a coefficient to determine -whether a pixel should be blurred or not. The option value must be an -integer in the range [-30,30]. A value of 0 will filter all the image, -a value included in [0,30] will filter flat areas and a value included -in [-30,0] will filter edges. Default value is 0. -@end table - -If a chroma option is not explicitly set, the corresponding luma value -is set. - -@section stereo3d - -Convert between different stereoscopic image formats. - -The filters accept the following options: - -@table @option -@item in -Set stereoscopic image format of input. - -Available values for input image formats are: -@table @samp -@item sbsl -side by side parallel (left eye left, right eye right) - -@item sbsr -side by side crosseye (right eye left, left eye right) - -@item sbs2l -side by side parallel with half width resolution -(left eye left, right eye right) - -@item sbs2r -side by side crosseye with half width resolution -(right eye left, left eye right) - -@item abl -above-below (left eye above, right eye below) - -@item abr -above-below (right eye above, left eye below) - -@item ab2l -above-below with half height resolution -(left eye above, right eye below) - -@item ab2r -above-below with half height resolution -(right eye above, left eye below) - -@item al -alternating frames (left eye first, right eye second) - -@item ar -alternating frames (right eye first, left eye second) - -Default value is @samp{sbsl}. -@end table - -@item out -Set stereoscopic image format of output. - -Available values for output image formats are all the input formats as well as: -@table @samp -@item arbg -anaglyph red/blue gray -(red filter on left eye, blue filter on right eye) - -@item argg -anaglyph red/green gray -(red filter on left eye, green filter on right eye) - -@item arcg -anaglyph red/cyan gray -(red filter on left eye, cyan filter on right eye) - -@item arch -anaglyph red/cyan half colored -(red filter on left eye, cyan filter on right eye) - -@item arcc -anaglyph red/cyan color -(red filter on left eye, cyan filter on right eye) - -@item arcd -anaglyph red/cyan color optimized with the least squares projection of dubois -(red filter on left eye, cyan filter on right eye) - -@item agmg -anaglyph green/magenta gray -(green filter on left eye, magenta filter on right eye) - -@item agmh -anaglyph green/magenta half colored -(green filter on left eye, magenta filter on right eye) - -@item agmc -anaglyph green/magenta colored -(green filter on left eye, magenta filter on right eye) - -@item agmd -anaglyph green/magenta color optimized with the least squares projection of dubois -(green filter on left eye, magenta filter on right eye) - -@item aybg -anaglyph yellow/blue gray -(yellow filter on left eye, blue filter on right eye) - -@item aybh -anaglyph yellow/blue half colored -(yellow filter on left eye, blue filter on right eye) - -@item aybc -anaglyph yellow/blue colored -(yellow filter on left eye, blue filter on right eye) - -@item aybd -anaglyph yellow/blue color optimized with the least squares projection of dubois -(yellow filter on left eye, blue filter on right eye) - -@item irl -interleaved rows (left eye has top row, right eye starts on next row) - -@item irr -interleaved rows (right eye has top row, left eye starts on next row) - -@item ml -mono output (left eye only) - -@item mr -mono output (right eye only) -@end table - -Default value is @samp{arcd}. -@end table - -@subsection Examples - -@itemize -@item -Convert input video from side by side parallel to anaglyph yellow/blue dubois: -@example -stereo3d=sbsl:aybd -@end example - -@item -Convert input video from above bellow (left eye above, right eye below) to side by side crosseye. -@example -stereo3d=abl:sbsr -@end example -@end itemize - -@section spp - -Apply a simple postprocessing filter that compresses and decompresses the image -at several (or - in the case of @option{quality} level @code{6} - all) shifts -and average the results. - -The filter accepts the following options: - -@table @option -@item quality -Set quality. This option defines the number of levels for averaging. It accepts -an integer in the range 0-6. If set to @code{0}, the filter will have no -effect. A value of @code{6} means the higher quality. For each increment of -that value the speed drops by a factor of approximately 2. Default value is -@code{3}. - -@item qp -Force a constant quantization parameter. If not set, the filter will use the QP -from the video stream (if available). - -@item mode -Set thresholding mode. Available modes are: - -@table @samp -@item hard -Set hard thresholding (default). -@item soft -Set soft thresholding (better de-ringing effect, but likely blurrier). -@end table - -@item use_bframe_qp -Enable the use of the QP from the B-Frames if set to @code{1}. Using this -option may cause flicker since the B-Frames have often larger QP. Default is -@code{0} (not enabled). -@end table - -@anchor{subtitles} -@section subtitles - -Draw subtitles on top of input video using the libass library. - -To enable compilation of this filter you need to configure FFmpeg with -@code{--enable-libass}. This filter also requires a build with libavcodec and -libavformat to convert the passed subtitles file to ASS (Advanced Substation -Alpha) subtitles format. - -The filter accepts the following options: - -@table @option -@item filename, f -Set the filename of the subtitle file to read. It must be specified. - -@item original_size -Specify the size of the original video, the video for which the ASS file -was composed. For the syntax of this option, check the "Video size" section in -the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic, -this is necessary to correctly scale the fonts if the aspect ratio has been -changed. - -@item charenc -Set subtitles input character encoding. @code{subtitles} filter only. Only -useful if not UTF-8. -@end table - -If the first key is not specified, it is assumed that the first value -specifies the @option{filename}. - -For example, to render the file @file{sub.srt} on top of the input -video, use the command: -@example -subtitles=sub.srt -@end example - -which is equivalent to: -@example -subtitles=filename=sub.srt -@end example - -@section super2xsai - -Scale the input by 2x and smooth using the Super2xSaI (Scale and -Interpolate) pixel art scaling algorithm. - -Useful for enlarging pixel art images without reducing sharpness. - -@section swapuv -Swap U & V plane. - -@section telecine - -Apply telecine process to the video. - -This filter accepts the following options: - -@table @option -@item first_field -@table @samp -@item top, t -top field first -@item bottom, b -bottom field first -The default value is @code{top}. -@end table - -@item pattern -A string of numbers representing the pulldown pattern you wish to apply. -The default value is @code{23}. -@end table - -@example -Some typical patterns: - -NTSC output (30i): -27.5p: 32222 -24p: 23 (classic) -24p: 2332 (preferred) -20p: 33 -18p: 334 -16p: 3444 - -PAL output (25i): -27.5p: 12222 -24p: 222222222223 ("Euro pulldown") -16.67p: 33 -16p: 33333334 -@end example - -@section thumbnail -Select the most representative frame in a given sequence of consecutive frames. - -The filter accepts the following options: - -@table @option -@item n -Set the frames batch size to analyze; in a set of @var{n} frames, the filter -will pick one of them, and then handle the next batch of @var{n} frames until -the end. Default is @code{100}. -@end table - -Since the filter keeps track of the whole frames sequence, a bigger @var{n} -value will result in a higher memory usage, so a high value is not recommended. - -@subsection Examples - -@itemize -@item -Extract one picture each 50 frames: -@example -thumbnail=50 -@end example - -@item -Complete example of a thumbnail creation with @command{ffmpeg}: -@example -ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png -@end example -@end itemize - -@section tile - -Tile several successive frames together. - -The filter accepts the following options: - -@table @option - -@item layout -Set the grid size (i.e. the number of lines and columns). For the syntax of -this option, check the "Video size" section in the ffmpeg-utils manual. - -@item nb_frames -Set the maximum number of frames to render in the given area. It must be less -than or equal to @var{w}x@var{h}. The default value is @code{0}, meaning all -the area will be used. - -@item margin -Set the outer border margin in pixels. - -@item padding -Set the inner border thickness (i.e. the number of pixels between frames). For -more advanced padding options (such as having different values for the edges), -refer to the pad video filter. - -@item color -Specify the color of the unused areaFor the syntax of this option, check the -"Color" section in the ffmpeg-utils manual. The default value of @var{color} -is "black". -@end table - -@subsection Examples - -@itemize -@item -Produce 8x8 PNG tiles of all keyframes (@option{-skip_frame nokey}) in a movie: -@example -ffmpeg -skip_frame nokey -i file.avi -vf 'scale=128:72,tile=8x8' -an -vsync 0 keyframes%03d.png -@end example -The @option{-vsync 0} is necessary to prevent @command{ffmpeg} from -duplicating each output frame to accomodate the originally detected frame -rate. - -@item -Display @code{5} pictures in an area of @code{3x2} frames, -with @code{7} pixels between them, and @code{2} pixels of initial margin, using -mixed flat and named options: -@example -tile=3x2:nb_frames=5:padding=7:margin=2 -@end example -@end itemize - -@section tinterlace - -Perform various types of temporal field interlacing. - -Frames are counted starting from 1, so the first input frame is -considered odd. - -The filter accepts the following options: - -@table @option - -@item mode -Specify the mode of the interlacing. This option can also be specified -as a value alone. See below for a list of values for this option. - -Available values are: - -@table @samp -@item merge, 0 -Move odd frames into the upper field, even into the lower field, -generating a double height frame at half frame rate. - -@item drop_odd, 1 -Only output even frames, odd frames are dropped, generating a frame with -unchanged height at half frame rate. - -@item drop_even, 2 -Only output odd frames, even frames are dropped, generating a frame with -unchanged height at half frame rate. - -@item pad, 3 -Expand each frame to full height, but pad alternate lines with black, -generating a frame with double height at the same input frame rate. - -@item interleave_top, 4 -Interleave the upper field from odd frames with the lower field from -even frames, generating a frame with unchanged height at half frame rate. - -@item interleave_bottom, 5 -Interleave the lower field from odd frames with the upper field from -even frames, generating a frame with unchanged height at half frame rate. - -@item interlacex2, 6 -Double frame rate with unchanged height. Frames are inserted each -containing the second temporal field from the previous input frame and -the first temporal field from the next input frame. This mode relies on -the top_field_first flag. Useful for interlaced video displays with no -field synchronisation. -@end table - -Numeric values are deprecated but are accepted for backward -compatibility reasons. - -Default mode is @code{merge}. - -@item flags -Specify flags influencing the filter process. - -Available value for @var{flags} is: - -@table @option -@item low_pass_filter, vlfp -Enable vertical low-pass filtering in the filter. -Vertical low-pass filtering is required when creating an interlaced -destination from a progressive source which contains high-frequency -vertical detail. Filtering will reduce interlace 'twitter' and Moire -patterning. - -Vertical low-pass filtering can only be enabled for @option{mode} -@var{interleave_top} and @var{interleave_bottom}. - -@end table -@end table - -@section transpose - -Transpose rows with columns in the input video and optionally flip it. - -This filter accepts the following options: - -@table @option - -@item dir -Specify the transposition direction. - -Can assume the following values: -@table @samp -@item 0, 4, cclock_flip -Rotate by 90 degrees counterclockwise and vertically flip (default), that is: -@example -L.R L.l -. . -> . . -l.r R.r -@end example - -@item 1, 5, clock -Rotate by 90 degrees clockwise, that is: -@example -L.R l.L -. . -> . . -l.r r.R -@end example - -@item 2, 6, cclock -Rotate by 90 degrees counterclockwise, that is: -@example -L.R R.r -. . -> . . -l.r L.l -@end example - -@item 3, 7, clock_flip -Rotate by 90 degrees clockwise and vertically flip, that is: -@example -L.R r.R -. . -> . . -l.r l.L -@end example -@end table - -For values between 4-7, the transposition is only done if the input -video geometry is portrait and not landscape. These values are -deprecated, the @code{passthrough} option should be used instead. - -Numerical values are deprecated, and should be dropped in favor of -symbolic constants. - -@item passthrough -Do not apply the transposition if the input geometry matches the one -specified by the specified value. It accepts the following values: -@table @samp -@item none -Always apply transposition. -@item portrait -Preserve portrait geometry (when @var{height} >= @var{width}). -@item landscape -Preserve landscape geometry (when @var{width} >= @var{height}). -@end table - -Default value is @code{none}. -@end table - -For example to rotate by 90 degrees clockwise and preserve portrait -layout: -@example -transpose=dir=1:passthrough=portrait -@end example - -The command above can also be specified as: -@example -transpose=1:portrait -@end example - -@section trim -Trim the input so that the output contains one continuous subpart of the input. - -This filter accepts the following options: -@table @option -@item start -Specify time of the start of the kept section, i.e. the frame with the -timestamp @var{start} will be the first frame in the output. - -@item end -Specify time of the first frame that will be dropped, i.e. the frame -immediately preceding the one with the timestamp @var{end} will be the last -frame in the output. - -@item start_pts -Same as @var{start}, except this option sets the start timestamp in timebase -units instead of seconds. - -@item end_pts -Same as @var{end}, except this option sets the end timestamp in timebase units -instead of seconds. - -@item duration -Specify maximum duration of the output. - -@item start_frame -Number of the first frame that should be passed to output. - -@item end_frame -Number of the first frame that should be dropped. -@end table - -@option{start}, @option{end}, @option{duration} are expressed as time -duration specifications, check the "Time duration" section in the -ffmpeg-utils manual. - -Note that the first two sets of the start/end options and the @option{duration} -option look at the frame timestamp, while the _frame variants simply count the -frames that pass through the filter. Also note that this filter does not modify -the timestamps. If you wish that the output timestamps start at zero, insert a -setpts filter after the trim filter. - -If multiple start or end options are set, this filter tries to be greedy and -keep all the frames that match at least one of the specified constraints. To keep -only the part that matches all the constraints at once, chain multiple trim -filters. - -The defaults are such that all the input is kept. So it is possible to set e.g. -just the end values to keep everything before the specified time. - -Examples: -@itemize -@item -drop everything except the second minute of input -@example -ffmpeg -i INPUT -vf trim=60:120 -@end example - -@item -keep only the first second -@example -ffmpeg -i INPUT -vf trim=duration=1 -@end example - -@end itemize - - -@section unsharp - -Sharpen or blur the input video. - -It accepts the following parameters: - -@table @option -@item luma_msize_x, lx -Set the luma matrix horizontal size. It must be an odd integer between -3 and 63, default value is 5. - -@item luma_msize_y, ly -Set the luma matrix vertical size. It must be an odd integer between 3 -and 63, default value is 5. - -@item luma_amount, la -Set the luma effect strength. It can be a float number, reasonable -values lay between -1.5 and 1.5. - -Negative values will blur the input video, while positive values will -sharpen it, a value of zero will disable the effect. - -Default value is 1.0. - -@item chroma_msize_x, cx -Set the chroma matrix horizontal size. It must be an odd integer -between 3 and 63, default value is 5. - -@item chroma_msize_y, cy -Set the chroma matrix vertical size. It must be an odd integer -between 3 and 63, default value is 5. - -@item chroma_amount, ca -Set the chroma effect strength. It can be a float number, reasonable -values lay between -1.5 and 1.5. - -Negative values will blur the input video, while positive values will -sharpen it, a value of zero will disable the effect. - -Default value is 0.0. - -@item opencl -If set to 1, specify using OpenCL capabilities, only available if -FFmpeg was configured with @code{--enable-opencl}. Default value is 0. - -@end table - -All parameters are optional and default to the equivalent of the -string '5:5:1.0:5:5:0.0'. - -@subsection Examples - -@itemize -@item -Apply strong luma sharpen effect: -@example -unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5 -@end example - -@item -Apply strong blur of both luma and chroma parameters: -@example -unsharp=7:7:-2:7:7:-2 -@end example -@end itemize - -@anchor{vidstabdetect} -@section vidstabdetect - -Analyze video stabilization/deshaking. Perform pass 1 of 2, see -@ref{vidstabtransform} for pass 2. - -This filter generates a file with relative translation and rotation -transform information about subsequent frames, which is then used by -the @ref{vidstabtransform} filter. - -To enable compilation of this filter you need to configure FFmpeg with -@code{--enable-libvidstab}. - -This filter accepts the following options: - -@table @option -@item result -Set the path to the file used to write the transforms information. -Default value is @file{transforms.trf}. - -@item shakiness -Set how shaky the video is and how quick the camera is. It accepts an -integer in the range 1-10, a value of 1 means little shakiness, a -value of 10 means strong shakiness. Default value is 5. - -@item accuracy -Set the accuracy of the detection process. It must be a value in the -range 1-15. A value of 1 means low accuracy, a value of 15 means high -accuracy. Default value is 9. - -@item stepsize -Set stepsize of the search process. The region around minimum is -scanned with 1 pixel resolution. Default value is 6. - -@item mincontrast -Set minimum contrast. Below this value a local measurement field is -discarded. Must be a floating point value in the range 0-1. Default -value is 0.3. - -@item tripod -Set reference frame number for tripod mode. - -If enabled, the motion of the frames is compared to a reference frame -in the filtered stream, identified by the specified number. The idea -is to compensate all movements in a more-or-less static scene and keep -the camera view absolutely still. - -If set to 0, it is disabled. The frames are counted starting from 1. - -@item show -Show fields and transforms in the resulting frames. It accepts an -integer in the range 0-2. Default value is 0, which disables any -visualization. -@end table - -@subsection Examples - -@itemize -@item -Use default values: -@example -vidstabdetect -@end example - -@item -Analyze strongly shaky movie and put the results in file -@file{mytransforms.trf}: -@example -vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf" -@end example - -@item -Visualize the result of internal transformations in the resulting -video: -@example -vidstabdetect=show=1 -@end example - -@item -Analyze a video with medium shakiness using @command{ffmpeg}: -@example -ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi -@end example -@end itemize - -@anchor{vidstabtransform} -@section vidstabtransform - -Video stabilization/deshaking: pass 2 of 2, -see @ref{vidstabdetect} for pass 1. - -Read a file with transform information for each frame and -apply/compensate them. Together with the @ref{vidstabdetect} -filter this can be used to deshake videos. See also -@url{http://public.hronopik.de/vid.stab}. It is important to also use -the unsharp filter, see below. - -To enable compilation of this filter you need to configure FFmpeg with -@code{--enable-libvidstab}. - -This filter accepts the following options: - -@table @option - -@item input -path to the file used to read the transforms (default: @file{transforms.trf}) - -@item smoothing -number of frames (value*2 + 1) used for lowpass filtering the camera movements -(default: 10). For example a number of 10 means that 21 frames are used -(10 in the past and 10 in the future) to smoothen the motion in the -video. A larger values leads to a smoother video, but limits the -acceleration of the camera (pan/tilt movements). - -@item maxshift -maximal number of pixels to translate frames (default: -1 no limit) - -@item maxangle -maximal angle in radians (degree*PI/180) to rotate frames (default: -1 -no limit) - -@item crop -How to deal with borders that may be visible due to movement -compensation. Available values are: - -@table @samp -@item keep -keep image information from previous frame (default) -@item black -fill the border black -@end table - -@item invert -@table @samp -@item 0 -keep transforms normal (default) -@item 1 -invert transforms -@end table - -@item relative -consider transforms as -@table @samp -@item 0 -absolute -@item 1 -relative to previous frame (default) -@end table - -@item zoom -percentage to zoom (default: 0) -@table @samp -@item >0 -zoom in -@item <0 -zoom out -@end table - -@item optzoom -set optimal zooming to avoid borders -@table @samp -@item 0 -disabled -@item 1 -optimal static zoom value is determined (only very strong movements will lead to visible borders) (default) -@item 2 -optimal adaptive zoom value is determined (no borders will be visible) -@end table -Note that the value given at zoom is added to the one calculated -here. - -@item interpol -type of interpolation - -Available values are: -@table @samp -@item no -no interpolation -@item linear -linear only horizontal -@item bilinear -linear in both directions (default) -@item bicubic -cubic in both directions (slow) -@end table - -@item tripod -virtual tripod mode means that the video is stabilized such that the -camera stays stationary. Use also @code{tripod} option of -@ref{vidstabdetect}. -@table @samp -@item 0 -off (default) -@item 1 -virtual tripod mode: equivalent to @code{relative=0:smoothing=0} -@end table - -@end table - -@subsection Examples - -@itemize -@item -typical call with default default values: - (note the unsharp filter which is always recommended) -@example -ffmpeg -i inp.mpeg -vf vidstabtransform,unsharp=5:5:0.8:3:3:0.4 inp_stabilized.mpeg -@end example - -@item -zoom in a bit more and load transform data from a given file -@example -vidstabtransform=zoom=5:input="mytransforms.trf" -@end example - -@item -smoothen the video even more -@example -vidstabtransform=smoothing=30 -@end example - -@end itemize - -@section vflip - -Flip the input video vertically. - -For example, to vertically flip a video with @command{ffmpeg}: -@example -ffmpeg -i in.avi -vf "vflip" out.avi -@end example - -@section vignette - -Make or reverse a natural vignetting effect. - -The filter accepts the following options: - -@table @option -@item angle, a -Set lens angle expression as a number of radians. - -The value is clipped in the @code{[0,PI/2]} range. - -Default value: @code{"PI/5"} - -@item x0 -@item y0 -Set center coordinates expressions. Respectively @code{"w/2"} and @code{"h/2"} -by default. - -@item mode -Set forward/backward mode. - -Available modes are: -@table @samp -@item forward -The larger the distance from the central point, the darker the image becomes. - -@item backward -The larger the distance from the central point, the brighter the image becomes. -This can be used to reverse a vignette effect, though there is no automatic -detection to extract the lens @option{angle} and other settings (yet). It can -also be used to create a burning effect. -@end table - -Default value is @samp{forward}. - -@item eval -Set evaluation mode for the expressions (@option{angle}, @option{x0}, @option{y0}). - -It accepts the following values: -@table @samp -@item init -Evaluate expressions only once during the filter initialization. - -@item frame -Evaluate expressions for each incoming frame. This is way slower than the -@samp{init} mode since it requires all the scalers to be re-computed, but it -allows advanced dynamic expressions. -@end table - -Default value is @samp{init}. - -@item dither -Set dithering to reduce the circular banding effects. Default is @code{1} -(enabled). - -@item aspect -Set vignette aspect. This setting allows to adjust the shape of the vignette. -Setting this value to the SAR of the input will make a rectangular vignetting -following the dimensions of the video. - -Default is @code{1/1}. -@end table - -@subsection Expressions - -The @option{alpha}, @option{x0} and @option{y0} expressions can contain the -following parameters. - -@table @option -@item w -@item h -input width and height - -@item n -the number of input frame, starting from 0 - -@item pts -the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in -@var{TB} units, NAN if undefined - -@item r -frame rate of the input video, NAN if the input frame rate is unknown - -@item t -the PTS (Presentation TimeStamp) of the filtered video frame, -expressed in seconds, NAN if undefined - -@item tb -time base of the input video -@end table - - -@subsection Examples - -@itemize -@item -Apply simple strong vignetting effect: -@example -vignette=PI/4 -@end example - -@item -Make a flickering vignetting: -@example -vignette='PI/4+random(1)*PI/50':eval=frame -@end example - -@end itemize - -@section w3fdif - -Deinterlace the input video ("w3fdif" stands for "Weston 3 Field -Deinterlacing Filter"). - -Based on the process described by Martin Weston for BBC R&D, and -implemented based on the de-interlace algorithm written by Jim -Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter -uses filter coefficients calculated by BBC R&D. - -There are two sets of filter coefficients, so called "simple": -and "complex". Which set of filter coefficients is used can -be set by passing an optional parameter: - -@table @option -@item filter -Set the interlacing filter coefficients. Accepts one of the following values: - -@table @samp -@item simple -Simple filter coefficient set. -@item complex -More-complex filter coefficient set. -@end table -Default value is @samp{complex}. - -@item deint -Specify which frames to deinterlace. Accept one of the following values: - -@table @samp -@item all -Deinterlace all frames, -@item interlaced -Only deinterlace frames marked as interlaced. -@end table - -Default value is @samp{all}. -@end table - -@anchor{yadif} -@section yadif - -Deinterlace the input video ("yadif" means "yet another deinterlacing -filter"). - -This filter accepts the following options: - - -@table @option - -@item mode -The interlacing mode to adopt, accepts one of the following values: - -@table @option -@item 0, send_frame -output 1 frame for each frame -@item 1, send_field -output 1 frame for each field -@item 2, send_frame_nospatial -like @code{send_frame} but skip spatial interlacing check -@item 3, send_field_nospatial -like @code{send_field} but skip spatial interlacing check -@end table - -Default value is @code{send_frame}. - -@item parity -The picture field parity assumed for the input interlaced video, accepts one of -the following values: - -@table @option -@item 0, tff -assume top field first -@item 1, bff -assume bottom field first -@item -1, auto -enable automatic detection -@end table - -Default value is @code{auto}. -If interlacing is unknown or decoder does not export this information, -top field first will be assumed. - -@item deint -Specify which frames to deinterlace. Accept one of the following -values: - -@table @option -@item 0, all -deinterlace all frames -@item 1, interlaced -only deinterlace frames marked as interlaced -@end table - -Default value is @code{all}. -@end table - -@c man end VIDEO FILTERS - -@chapter Video Sources -@c man begin VIDEO SOURCES - -Below is a description of the currently available video sources. - -@section buffer - -Buffer video frames, and make them available to the filter chain. - -This source is mainly intended for a programmatic use, in particular -through the interface defined in @file{libavfilter/vsrc_buffer.h}. - -This source accepts the following options: - -@table @option - -@item video_size -Specify the size (width and height) of the buffered video frames. For the -syntax of this option, check the "Video size" section in the ffmpeg-utils -manual. - -@item width -Input video width. - -@item height -Input video height. - -@item pix_fmt -A string representing the pixel format of the buffered video frames. -It may be a number corresponding to a pixel format, or a pixel format -name. - -@item time_base -Specify the timebase assumed by the timestamps of the buffered frames. - -@item frame_rate -Specify the frame rate expected for the video stream. - -@item pixel_aspect, sar -Specify the sample aspect ratio assumed by the video frames. - -@item sws_param -Specify the optional parameters to be used for the scale filter which -is automatically inserted when an input change is detected in the -input size or format. -@end table - -For example: -@example -buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1 -@end example - -will instruct the source to accept video frames with size 320x240 and -with format "yuv410p", assuming 1/24 as the timestamps timebase and -square pixels (1:1 sample aspect ratio). -Since the pixel format with name "yuv410p" corresponds to the number 6 -(check the enum AVPixelFormat definition in @file{libavutil/pixfmt.h}), -this example corresponds to: -@example -buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1 -@end example - -Alternatively, the options can be specified as a flat string, but this -syntax is deprecated: - -@var{width}:@var{height}:@var{pix_fmt}:@var{time_base.num}:@var{time_base.den}:@var{pixel_aspect.num}:@var{pixel_aspect.den}[:@var{sws_param}] - -@section cellauto - -Create a pattern generated by an elementary cellular automaton. - -The initial state of the cellular automaton can be defined through the -@option{filename}, and @option{pattern} options. If such options are -not specified an initial state is created randomly. - -At each new frame a new row in the video is filled with the result of -the cellular automaton next generation. The behavior when the whole -frame is filled is defined by the @option{scroll} option. - -This source accepts the following options: - -@table @option -@item filename, f -Read the initial cellular automaton state, i.e. the starting row, from -the specified file. -In the file, each non-whitespace character is considered an alive -cell, a newline will terminate the row, and further characters in the -file will be ignored. - -@item pattern, p -Read the initial cellular automaton state, i.e. the starting row, from -the specified string. - -Each non-whitespace character in the string is considered an alive -cell, a newline will terminate the row, and further characters in the -string will be ignored. - -@item rate, r -Set the video rate, that is the number of frames generated per second. -Default is 25. - -@item random_fill_ratio, ratio -Set the random fill ratio for the initial cellular automaton row. It -is a floating point number value ranging from 0 to 1, defaults to -1/PHI. - -This option is ignored when a file or a pattern is specified. - -@item random_seed, seed -Set the seed for filling randomly the initial row, must be an integer -included between 0 and UINT32_MAX. If not specified, or if explicitly -set to -1, the filter will try to use a good random seed on a best -effort basis. - -@item rule -Set the cellular automaton rule, it is a number ranging from 0 to 255. -Default value is 110. - -@item size, s -Set the size of the output video. For the syntax of this option, check -the "Video size" section in the ffmpeg-utils manual. - -If @option{filename} or @option{pattern} is specified, the size is set -by default to the width of the specified initial state row, and the -height is set to @var{width} * PHI. - -If @option{size} is set, it must contain the width of the specified -pattern string, and the specified pattern will be centered in the -larger row. - -If a filename or a pattern string is not specified, the size value -defaults to "320x518" (used for a randomly generated initial state). - -@item scroll -If set to 1, scroll the output upward when all the rows in the output -have been already filled. If set to 0, the new generated row will be -written over the top row just after the bottom row is filled. -Defaults to 1. - -@item start_full, full -If set to 1, completely fill the output with generated rows before -outputting the first frame. -This is the default behavior, for disabling set the value to 0. - -@item stitch -If set to 1, stitch the left and right row edges together. -This is the default behavior, for disabling set the value to 0. -@end table - -@subsection Examples - -@itemize -@item -Read the initial state from @file{pattern}, and specify an output of -size 200x400. -@example -cellauto=f=pattern:s=200x400 -@end example - -@item -Generate a random initial row with a width of 200 cells, with a fill -ratio of 2/3: -@example -cellauto=ratio=2/3:s=200x200 -@end example - -@item -Create a pattern generated by rule 18 starting by a single alive cell -centered on an initial row with width 100: -@example -cellauto=p=@@:s=100x400:full=0:rule=18 -@end example - -@item -Specify a more elaborated initial pattern: -@example -cellauto=p='@@@@ @@ @@@@':s=100x400:full=0:rule=18 -@end example - -@end itemize - -@section mandelbrot - -Generate a Mandelbrot set fractal, and progressively zoom towards the -point specified with @var{start_x} and @var{start_y}. - -This source accepts the following options: - -@table @option - -@item end_pts -Set the terminal pts value. Default value is 400. - -@item end_scale -Set the terminal scale value. -Must be a floating point value. Default value is 0.3. - -@item inner -Set the inner coloring mode, that is the algorithm used to draw the -Mandelbrot fractal internal region. - -It shall assume one of the following values: -@table @option -@item black -Set black mode. -@item convergence -Show time until convergence. -@item mincol -Set color based on point closest to the origin of the iterations. -@item period -Set period mode. -@end table - -Default value is @var{mincol}. - -@item bailout -Set the bailout value. Default value is 10.0. - -@item maxiter -Set the maximum of iterations performed by the rendering -algorithm. Default value is 7189. - -@item outer -Set outer coloring mode. -It shall assume one of following values: -@table @option -@item iteration_count -Set iteration cound mode. -@item normalized_iteration_count -set normalized iteration count mode. -@end table -Default value is @var{normalized_iteration_count}. - -@item rate, r -Set frame rate, expressed as number of frames per second. Default -value is "25". - -@item size, s -Set frame size. For the syntax of this option, check the "Video -size" section in the ffmpeg-utils manual. Default value is "640x480". - -@item start_scale -Set the initial scale value. Default value is 3.0. - -@item start_x -Set the initial x position. Must be a floating point value between --100 and 100. Default value is -0.743643887037158704752191506114774. - -@item start_y -Set the initial y position. Must be a floating point value between --100 and 100. Default value is -0.131825904205311970493132056385139. -@end table - -@section mptestsrc - -Generate various test patterns, as generated by the MPlayer test filter. - -The size of the generated video is fixed, and is 256x256. -This source is useful in particular for testing encoding features. - -This source accepts the following options: - -@table @option - -@item rate, r -Specify the frame rate of the sourced video, as the number of frames -generated per second. It has to be a string in the format -@var{frame_rate_num}/@var{frame_rate_den}, an integer number, a float -number or a valid video frame rate abbreviation. The default value is -"25". - -@item duration, d -Set the video duration of the sourced video. The accepted syntax is: -@example -[-]HH:MM:SS[.m...] -[-]S+[.m...] -@end example -See also the function @code{av_parse_time()}. - -If not specified, or the expressed duration is negative, the video is -supposed to be generated forever. - -@item test, t - -Set the number or the name of the test to perform. Supported tests are: -@table @option -@item dc_luma -@item dc_chroma -@item freq_luma -@item freq_chroma -@item amp_luma -@item amp_chroma -@item cbp -@item mv -@item ring1 -@item ring2 -@item all -@end table - -Default value is "all", which will cycle through the list of all tests. -@end table - -For example the following: -@example -testsrc=t=dc_luma -@end example - -will generate a "dc_luma" test pattern. - -@section frei0r_src - -Provide a frei0r source. - -To enable compilation of this filter you need to install the frei0r -header and configure FFmpeg with @code{--enable-frei0r}. - -This source accepts the following options: - -@table @option - -@item size -The size of the video to generate. For the syntax of this option, check the -"Video size" section in the ffmpeg-utils manual. - -@item framerate -Framerate of the generated video, may be a string of the form -@var{num}/@var{den} or a frame rate abbreviation. - -@item filter_name -The name to the frei0r source to load. For more information regarding frei0r and -how to set the parameters read the section @ref{frei0r} in the description of -the video filters. - -@item filter_params -A '|'-separated list of parameters to pass to the frei0r source. - -@end table - -For example, to generate a frei0r partik0l source with size 200x200 -and frame rate 10 which is overlayed on the overlay filter main input: -@example -frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay -@end example - -@section life - -Generate a life pattern. - -This source is based on a generalization of John Conway's life game. - -The sourced input represents a life grid, each pixel represents a cell -which can be in one of two possible states, alive or dead. Every cell -interacts with its eight neighbours, which are the cells that are -horizontally, vertically, or diagonally adjacent. - -At each interaction the grid evolves according to the adopted rule, -which specifies the number of neighbor alive cells which will make a -cell stay alive or born. The @option{rule} option allows to specify -the rule to adopt. - -This source accepts the following options: - -@table @option -@item filename, f -Set the file from which to read the initial grid state. In the file, -each non-whitespace character is considered an alive cell, and newline -is used to delimit the end of each row. - -If this option is not specified, the initial grid is generated -randomly. - -@item rate, r -Set the video rate, that is the number of frames generated per second. -Default is 25. - -@item random_fill_ratio, ratio -Set the random fill ratio for the initial random grid. It is a -floating point number value ranging from 0 to 1, defaults to 1/PHI. -It is ignored when a file is specified. - -@item random_seed, seed -Set the seed for filling the initial random grid, must be an integer -included between 0 and UINT32_MAX. If not specified, or if explicitly -set to -1, the filter will try to use a good random seed on a best -effort basis. - -@item rule -Set the life rule. - -A rule can be specified with a code of the kind "S@var{NS}/B@var{NB}", -where @var{NS} and @var{NB} are sequences of numbers in the range 0-8, -@var{NS} specifies the number of alive neighbor cells which make a -live cell stay alive, and @var{NB} the number of alive neighbor cells -which make a dead cell to become alive (i.e. to "born"). -"s" and "b" can be used in place of "S" and "B", respectively. - -Alternatively a rule can be specified by an 18-bits integer. The 9 -high order bits are used to encode the next cell state if it is alive -for each number of neighbor alive cells, the low order bits specify -the rule for "borning" new cells. Higher order bits encode for an -higher number of neighbor cells. -For example the number 6153 = @code{(12<<9)+9} specifies a stay alive -rule of 12 and a born rule of 9, which corresponds to "S23/B03". - -Default value is "S23/B3", which is the original Conway's game of life -rule, and will keep a cell alive if it has 2 or 3 neighbor alive -cells, and will born a new cell if there are three alive cells around -a dead cell. - -@item size, s -Set the size of the output video. For the syntax of this option, check the -"Video size" section in the ffmpeg-utils manual. - -If @option{filename} is specified, the size is set by default to the -same size of the input file. If @option{size} is set, it must contain -the size specified in the input file, and the initial grid defined in -that file is centered in the larger resulting area. - -If a filename is not specified, the size value defaults to "320x240" -(used for a randomly generated initial grid). - -@item stitch -If set to 1, stitch the left and right grid edges together, and the -top and bottom edges also. Defaults to 1. - -@item mold -Set cell mold speed. If set, a dead cell will go from @option{death_color} to -@option{mold_color} with a step of @option{mold}. @option{mold} can have a -value from 0 to 255. - -@item life_color -Set the color of living (or new born) cells. - -@item death_color -Set the color of dead cells. If @option{mold} is set, this is the first color -used to represent a dead cell. - -@item mold_color -Set mold color, for definitely dead and moldy cells. - -For the syntax of these 3 color options, check the "Color" section in the -ffmpeg-utils manual. -@end table - -@subsection Examples - -@itemize -@item -Read a grid from @file{pattern}, and center it on a grid of size -300x300 pixels: -@example -life=f=pattern:s=300x300 -@end example - -@item -Generate a random grid of size 200x200, with a fill ratio of 2/3: -@example -life=ratio=2/3:s=200x200 -@end example - -@item -Specify a custom rule for evolving a randomly generated grid: -@example -life=rule=S14/B34 -@end example - -@item -Full example with slow death effect (mold) using @command{ffplay}: -@example -ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16 -@end example -@end itemize - -@anchor{color} -@anchor{haldclutsrc} -@anchor{nullsrc} -@anchor{rgbtestsrc} -@anchor{smptebars} -@anchor{smptehdbars} -@anchor{testsrc} -@section color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc - -The @code{color} source provides an uniformly colored input. - -The @code{haldclutsrc} source provides an identity Hald CLUT. See also -@ref{haldclut} filter. - -The @code{nullsrc} source returns unprocessed video frames. It is -mainly useful to be employed in analysis / debugging tools, or as the -source for filters which ignore the input data. - -The @code{rgbtestsrc} source generates an RGB test pattern useful for -detecting RGB vs BGR issues. You should see a red, green and blue -stripe from top to bottom. - -The @code{smptebars} source generates a color bars pattern, based on -the SMPTE Engineering Guideline EG 1-1990. - -The @code{smptehdbars} source generates a color bars pattern, based on -the SMPTE RP 219-2002. - -The @code{testsrc} source generates a test video pattern, showing a -color pattern, a scrolling gradient and a timestamp. This is mainly -intended for testing purposes. - -The sources accept the following options: - -@table @option - -@item color, c -Specify the color of the source, only available in the @code{color} -source. For the syntax of this option, check the "Color" section in the -ffmpeg-utils manual. - -@item level -Specify the level of the Hald CLUT, only available in the @code{haldclutsrc} -source. A level of @code{N} generates a picture of @code{N*N*N} by @code{N*N*N} -pixels to be used as identity matrix for 3D lookup tables. Each component is -coded on a @code{1/(N*N)} scale. - -@item size, s -Specify the size of the sourced video. For the syntax of this option, check the -"Video size" section in the ffmpeg-utils manual. The default value is -"320x240". - -This option is not available with the @code{haldclutsrc} filter. - -@item rate, r -Specify the frame rate of the sourced video, as the number of frames -generated per second. It has to be a string in the format -@var{frame_rate_num}/@var{frame_rate_den}, an integer number, a float -number or a valid video frame rate abbreviation. The default value is -"25". - -@item sar -Set the sample aspect ratio of the sourced video. - -@item duration, d -Set the video duration of the sourced video. The accepted syntax is: -@example -[-]HH[:MM[:SS[.m...]]] -[-]S+[.m...] -@end example -See also the function @code{av_parse_time()}. - -If not specified, or the expressed duration is negative, the video is -supposed to be generated forever. - -@item decimals, n -Set the number of decimals to show in the timestamp, only available in the -@code{testsrc} source. - -The displayed timestamp value will correspond to the original -timestamp value multiplied by the power of 10 of the specified -value. Default value is 0. -@end table - -For example the following: -@example -testsrc=duration=5.3:size=qcif:rate=10 -@end example - -will generate a video with a duration of 5.3 seconds, with size -176x144 and a frame rate of 10 frames per second. - -The following graph description will generate a red source -with an opacity of 0.2, with size "qcif" and a frame rate of 10 -frames per second. -@example -color=c=red@@0.2:s=qcif:r=10 -@end example - -If the input content is to be ignored, @code{nullsrc} can be used. The -following command generates noise in the luminance plane by employing -the @code{geq} filter: -@example -nullsrc=s=256x256, geq=random(1)*255:128:128 -@end example - -@subsection Commands - -The @code{color} source supports the following commands: - -@table @option -@item c, color -Set the color of the created image. Accepts the same syntax of the -corresponding @option{color} option. -@end table - -@c man end VIDEO SOURCES - -@chapter Video Sinks -@c man begin VIDEO SINKS - -Below is a description of the currently available video sinks. - -@section buffersink - -Buffer video frames, and make them available to the end of the filter -graph. - -This sink is mainly intended for a programmatic use, in particular -through the interface defined in @file{libavfilter/buffersink.h} -or the options system. - -It accepts a pointer to an AVBufferSinkContext structure, which -defines the incoming buffers' formats, to be passed as the opaque -parameter to @code{avfilter_init_filter} for initialization. - -@section nullsink - -Null video sink, do absolutely nothing with the input video. It is -mainly useful as a template and to be employed in analysis / debugging -tools. - -@c man end VIDEO SINKS - -@chapter Multimedia Filters -@c man begin MULTIMEDIA FILTERS - -Below is a description of the currently available multimedia filters. - -@section avectorscope - -Convert input audio to a video output, representing the audio vector -scope. - -The filter is used to measure the difference between channels of stereo -audio stream. A monoaural signal, consisting of identical left and right -signal, results in straight vertical line. Any stereo separation is visible -as a deviation from this line, creating a Lissajous figure. -If the straight (or deviation from it) but horizontal line appears this -indicates that the left and right channels are out of phase. - -The filter accepts the following options: - -@table @option -@item mode, m -Set the vectorscope mode. - -Available values are: -@table @samp -@item lissajous -Lissajous rotated by 45 degrees. - -@item lissajous_xy -Same as above but not rotated. -@end table - -Default value is @samp{lissajous}. - -@item size, s -Set the video size for the output. For the syntax of this option, check the "Video size" -section in the ffmpeg-utils manual. Default value is @code{400x400}. - -@item rate, r -Set the output frame rate. Default value is @code{25}. - -@item rc -@item gc -@item bc -Specify the red, green and blue contrast. Default values are @code{40}, @code{160} and @code{80}. -Allowed range is @code{[0, 255]}. - -@item rf -@item gf -@item bf -Specify the red, green and blue fade. Default values are @code{15}, @code{10} and @code{5}. -Allowed range is @code{[0, 255]}. - -@item zoom -Set the zoom factor. Default value is @code{1}. Allowed range is @code{[1, 10]}. -@end table - -@subsection Examples - -@itemize -@item -Complete example using @command{ffplay}: -@example -ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1]; - [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]' -@end example -@end itemize - -@section concat - -Concatenate audio and video streams, joining them together one after the -other. - -The filter works on segments of synchronized video and audio streams. All -segments must have the same number of streams of each type, and that will -also be the number of streams at output. - -The filter accepts the following options: - -@table @option - -@item n -Set the number of segments. Default is 2. - -@item v -Set the number of output video streams, that is also the number of video -streams in each segment. Default is 1. - -@item a -Set the number of output audio streams, that is also the number of video -streams in each segment. Default is 0. - -@item unsafe -Activate unsafe mode: do not fail if segments have a different format. - -@end table - -The filter has @var{v}+@var{a} outputs: first @var{v} video outputs, then -@var{a} audio outputs. - -There are @var{n}x(@var{v}+@var{a}) inputs: first the inputs for the first -segment, in the same order as the outputs, then the inputs for the second -segment, etc. - -Related streams do not always have exactly the same duration, for various -reasons including codec frame size or sloppy authoring. For that reason, -related synchronized streams (e.g. a video and its audio track) should be -concatenated at once. The concat filter will use the duration of the longest -stream in each segment (except the last one), and if necessary pad shorter -audio streams with silence. - -For this filter to work correctly, all segments must start at timestamp 0. - -All corresponding streams must have the same parameters in all segments; the -filtering system will automatically select a common pixel format for video -streams, and a common sample format, sample rate and channel layout for -audio streams, but other settings, such as resolution, must be converted -explicitly by the user. - -Different frame rates are acceptable but will result in variable frame rate -at output; be sure to configure the output file to handle it. - -@subsection Examples - -@itemize -@item -Concatenate an opening, an episode and an ending, all in bilingual version -(video in stream 0, audio in streams 1 and 2): -@example -ffmpeg -i opening.mkv -i episode.mkv -i ending.mkv -filter_complex \ - '[0:0] [0:1] [0:2] [1:0] [1:1] [1:2] [2:0] [2:1] [2:2] - concat=n=3:v=1:a=2 [v] [a1] [a2]' \ - -map '[v]' -map '[a1]' -map '[a2]' output.mkv -@end example - -@item -Concatenate two parts, handling audio and video separately, using the -(a)movie sources, and adjusting the resolution: -@example -movie=part1.mp4, scale=512:288 [v1] ; amovie=part1.mp4 [a1] ; -movie=part2.mp4, scale=512:288 [v2] ; amovie=part2.mp4 [a2] ; -[v1] [v2] concat [outv] ; [a1] [a2] concat=v=0:a=1 [outa] -@end example -Note that a desync will happen at the stitch if the audio and video streams -do not have exactly the same duration in the first file. - -@end itemize - -@section ebur128 - -EBU R128 scanner filter. This filter takes an audio stream as input and outputs -it unchanged. By default, it logs a message at a frequency of 10Hz with the -Momentary loudness (identified by @code{M}), Short-term loudness (@code{S}), -Integrated loudness (@code{I}) and Loudness Range (@code{LRA}). - -The filter also has a video output (see the @var{video} option) with a real -time graph to observe the loudness evolution. The graphic contains the logged -message mentioned above, so it is not printed anymore when this option is set, -unless the verbose logging is set. The main graphing area contains the -short-term loudness (3 seconds of analysis), and the gauge on the right is for -the momentary loudness (400 milliseconds). - -More information about the Loudness Recommendation EBU R128 on -@url{http://tech.ebu.ch/loudness}. - -The filter accepts the following options: - -@table @option - -@item video -Activate the video output. The audio stream is passed unchanged whether this -option is set or no. The video stream will be the first output stream if -activated. Default is @code{0}. - -@item size -Set the video size. This option is for video only. For the syntax of this -option, check the "Video size" section in the ffmpeg-utils manual. Default -and minimum resolution is @code{640x480}. - -@item meter -Set the EBU scale meter. Default is @code{9}. Common values are @code{9} and -@code{18}, respectively for EBU scale meter +9 and EBU scale meter +18. Any -other integer value between this range is allowed. - -@item metadata -Set metadata injection. If set to @code{1}, the audio input will be segmented -into 100ms output frames, each of them containing various loudness information -in metadata. All the metadata keys are prefixed with @code{lavfi.r128.}. - -Default is @code{0}. - -@item framelog -Force the frame logging level. - -Available values are: -@table @samp -@item info -information logging level -@item verbose -verbose logging level -@end table - -By default, the logging level is set to @var{info}. If the @option{video} or -the @option{metadata} options are set, it switches to @var{verbose}. -@end table - -@subsection Examples - -@itemize -@item -Real-time graph using @command{ffplay}, with a EBU scale meter +18: -@example -ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]" -@end example - -@item -Run an analysis with @command{ffmpeg}: -@example -ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null - -@end example -@end itemize - -@section interleave, ainterleave - -Temporally interleave frames from several inputs. - -@code{interleave} works with video inputs, @code{ainterleave} with audio. - -These filters read frames from several inputs and send the oldest -queued frame to the output. - -Input streams must have a well defined, monotonically increasing frame -timestamp values. - -In order to submit one frame to output, these filters need to enqueue -at least one frame for each input, so they cannot work in case one -input is not yet terminated and will not receive incoming frames. - -For example consider the case when one input is a @code{select} filter -which always drop input frames. The @code{interleave} filter will keep -reading from that input, but it will never be able to send new frames -to output until the input will send an end-of-stream signal. - -Also, depending on inputs synchronization, the filters will drop -frames in case one input receives more frames than the other ones, and -the queue is already filled. - -These filters accept the following options: - -@table @option -@item nb_inputs, n -Set the number of different inputs, it is 2 by default. -@end table - -@subsection Examples - -@itemize -@item -Interleave frames belonging to different streams using @command{ffmpeg}: -@example -ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi -@end example - -@item -Add flickering blur effect: -@example -select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave -@end example -@end itemize - -@section perms, aperms - -Set read/write permissions for the output frames. - -These filters are mainly aimed at developers to test direct path in the -following filter in the filtergraph. - -The filters accept the following options: - -@table @option -@item mode -Select the permissions mode. - -It accepts the following values: -@table @samp -@item none -Do nothing. This is the default. -@item ro -Set all the output frames read-only. -@item rw -Set all the output frames directly writable. -@item toggle -Make the frame read-only if writable, and writable if read-only. -@item random -Set each output frame read-only or writable randomly. -@end table - -@item seed -Set the seed for the @var{random} mode, must be an integer included between -@code{0} and @code{UINT32_MAX}. If not specified, or if explicitly set to -@code{-1}, the filter will try to use a good random seed on a best effort -basis. -@end table - -Note: in case of auto-inserted filter between the permission filter and the -following one, the permission might not be received as expected in that -following filter. Inserting a @ref{format} or @ref{aformat} filter before the -perms/aperms filter can avoid this problem. - -@section select, aselect - -Select frames to pass in output. - -This filter accepts the following options: - -@table @option - -@item expr, e -Set expression, which is evaluated for each input frame. - -If the expression is evaluated to zero, the frame is discarded. - -If the evaluation result is negative or NaN, the frame is sent to the -first output; otherwise it is sent to the output with index -@code{ceil(val)-1}, assuming that the input index starts from 0. - -For example a value of @code{1.2} corresponds to the output with index -@code{ceil(1.2)-1 = 2-1 = 1}, that is the second output. - -@item outputs, n -Set the number of outputs. The output to which to send the selected -frame is based on the result of the evaluation. Default value is 1. -@end table - -The expression can contain the following constants: - -@table @option -@item n -the sequential number of the filtered frame, starting from 0 - -@item selected_n -the sequential number of the selected frame, starting from 0 - -@item prev_selected_n -the sequential number of the last selected frame, NAN if undefined - -@item TB -timebase of the input timestamps - -@item pts -the PTS (Presentation TimeStamp) of the filtered video frame, -expressed in @var{TB} units, NAN if undefined - -@item t -the PTS (Presentation TimeStamp) of the filtered video frame, -expressed in seconds, NAN if undefined - -@item prev_pts -the PTS of the previously filtered video frame, NAN if undefined - -@item prev_selected_pts -the PTS of the last previously filtered video frame, NAN if undefined - -@item prev_selected_t -the PTS of the last previously selected video frame, NAN if undefined - -@item start_pts -the PTS of the first video frame in the video, NAN if undefined - -@item start_t -the time of the first video frame in the video, NAN if undefined - -@item pict_type @emph{(video only)} -the type of the filtered frame, can assume one of the following -values: -@table @option -@item I -@item P -@item B -@item S -@item SI -@item SP -@item BI -@end table - -@item interlace_type @emph{(video only)} -the frame interlace type, can assume one of the following values: -@table @option -@item PROGRESSIVE -the frame is progressive (not interlaced) -@item TOPFIRST -the frame is top-field-first -@item BOTTOMFIRST -the frame is bottom-field-first -@end table - -@item consumed_sample_n @emph{(audio only)} -the number of selected samples before the current frame - -@item samples_n @emph{(audio only)} -the number of samples in the current frame - -@item sample_rate @emph{(audio only)} -the input sample rate - -@item key -1 if the filtered frame is a key-frame, 0 otherwise - -@item pos -the position in the file of the filtered frame, -1 if the information -is not available (e.g. for synthetic video) - -@item scene @emph{(video only)} -value between 0 and 1 to indicate a new scene; a low value reflects a low -probability for the current frame to introduce a new scene, while a higher -value means the current frame is more likely to be one (see the example below) - -@end table - -The default value of the select expression is "1". - -@subsection Examples - -@itemize -@item -Select all frames in input: -@example -select -@end example - -The example above is the same as: -@example -select=1 -@end example - -@item -Skip all frames: -@example -select=0 -@end example - -@item -Select only I-frames: -@example -select='eq(pict_type\,I)' -@end example - -@item -Select one frame every 100: -@example -select='not(mod(n\,100))' -@end example - -@item -Select only frames contained in the 10-20 time interval: -@example -select=between(t\,10\,20) -@end example - -@item -Select only I frames contained in the 10-20 time interval: -@example -select=between(t\,10\,20)*eq(pict_type\,I) -@end example - -@item -Select frames with a minimum distance of 10 seconds: -@example -select='isnan(prev_selected_t)+gte(t-prev_selected_t\,10)' -@end example - -@item -Use aselect to select only audio frames with samples number > 100: -@example -aselect='gt(samples_n\,100)' -@end example - -@item -Create a mosaic of the first scenes: -@example -ffmpeg -i video.avi -vf select='gt(scene\,0.4)',scale=160:120,tile -frames:v 1 preview.png -@end example - -Comparing @var{scene} against a value between 0.3 and 0.5 is generally a sane -choice. - -@item -Send even and odd frames to separate outputs, and compose them: -@example -select=n=2:e='mod(n, 2)+1' [odd][even]; [odd] pad=h=2*ih [tmp]; [tmp][even] overlay=y=h -@end example -@end itemize - -@section sendcmd, asendcmd - -Send commands to filters in the filtergraph. - -These filters read commands to be sent to other filters in the -filtergraph. - -@code{sendcmd} must be inserted between two video filters, -@code{asendcmd} must be inserted between two audio filters, but apart -from that they act the same way. - -The specification of commands can be provided in the filter arguments -with the @var{commands} option, or in a file specified by the -@var{filename} option. - -These filters accept the following options: -@table @option -@item commands, c -Set the commands to be read and sent to the other filters. -@item filename, f -Set the filename of the commands to be read and sent to the other -filters. -@end table - -@subsection Commands syntax - -A commands description consists of a sequence of interval -specifications, comprising a list of commands to be executed when a -particular event related to that interval occurs. The occurring event -is typically the current frame time entering or leaving a given time -interval. - -An interval is specified by the following syntax: -@example -@var{START}[-@var{END}] @var{COMMANDS}; -@end example - -The time interval is specified by the @var{START} and @var{END} times. -@var{END} is optional and defaults to the maximum time. - -The current frame time is considered within the specified interval if -it is included in the interval [@var{START}, @var{END}), that is when -the time is greater or equal to @var{START} and is lesser than -@var{END}. - -@var{COMMANDS} consists of a sequence of one or more command -specifications, separated by ",", relating to that interval. The -syntax of a command specification is given by: -@example -[@var{FLAGS}] @var{TARGET} @var{COMMAND} @var{ARG} -@end example - -@var{FLAGS} is optional and specifies the type of events relating to -the time interval which enable sending the specified command, and must -be a non-null sequence of identifier flags separated by "+" or "|" and -enclosed between "[" and "]". - -The following flags are recognized: -@table @option -@item enter -The command is sent when the current frame timestamp enters the -specified interval. In other words, the command is sent when the -previous frame timestamp was not in the given interval, and the -current is. - -@item leave -The command is sent when the current frame timestamp leaves the -specified interval. In other words, the command is sent when the -previous frame timestamp was in the given interval, and the -current is not. -@end table - -If @var{FLAGS} is not specified, a default value of @code{[enter]} is -assumed. - -@var{TARGET} specifies the target of the command, usually the name of -the filter class or a specific filter instance name. - -@var{COMMAND} specifies the name of the command for the target filter. - -@var{ARG} is optional and specifies the optional list of argument for -the given @var{COMMAND}. - -Between one interval specification and another, whitespaces, or -sequences of characters starting with @code{#} until the end of line, -are ignored and can be used to annotate comments. - -A simplified BNF description of the commands specification syntax -follows: -@example -@var{COMMAND_FLAG} ::= "enter" | "leave" -@var{COMMAND_FLAGS} ::= @var{COMMAND_FLAG} [(+|"|")@var{COMMAND_FLAG}] -@var{COMMAND} ::= ["[" @var{COMMAND_FLAGS} "]"] @var{TARGET} @var{COMMAND} [@var{ARG}] -@var{COMMANDS} ::= @var{COMMAND} [,@var{COMMANDS}] -@var{INTERVAL} ::= @var{START}[-@var{END}] @var{COMMANDS} -@var{INTERVALS} ::= @var{INTERVAL}[;@var{INTERVALS}] -@end example - -@subsection Examples - -@itemize -@item -Specify audio tempo change at second 4: -@example -asendcmd=c='4.0 atempo tempo 1.5',atempo -@end example - -@item -Specify a list of drawtext and hue commands in a file. -@example -# show text in the interval 5-10 -5.0-10.0 [enter] drawtext reinit 'fontfile=FreeSerif.ttf:text=hello world', - [leave] drawtext reinit 'fontfile=FreeSerif.ttf:text='; - -# desaturate the image in the interval 15-20 -15.0-20.0 [enter] hue s 0, - [enter] drawtext reinit 'fontfile=FreeSerif.ttf:text=nocolor', - [leave] hue s 1, - [leave] drawtext reinit 'fontfile=FreeSerif.ttf:text=color'; - -# apply an exponential saturation fade-out effect, starting from time 25 -25 [enter] hue s exp(25-t) -@end example - -A filtergraph allowing to read and process the above command list -stored in a file @file{test.cmd}, can be specified with: -@example -sendcmd=f=test.cmd,drawtext=fontfile=FreeSerif.ttf:text='',hue -@end example -@end itemize - -@anchor{setpts} -@section setpts, asetpts - -Change the PTS (presentation timestamp) of the input frames. - -@code{setpts} works on video frames, @code{asetpts} on audio frames. - -This filter accepts the following options: - -@table @option - -@item expr -The expression which is evaluated for each frame to construct its timestamp. - -@end table - -The expression is evaluated through the eval API and can contain the following -constants: - -@table @option -@item FRAME_RATE -frame rate, only defined for constant frame-rate video - -@item PTS -the presentation timestamp in input - -@item N -the count of the input frame for video or the number of consumed samples, -not including the current frame for audio, starting from 0. - -@item NB_CONSUMED_SAMPLES -the number of consumed samples, not including the current frame (only -audio) - -@item NB_SAMPLES, S -the number of samples in the current frame (only audio) - -@item SAMPLE_RATE, SR -audio sample rate - -@item STARTPTS -the PTS of the first frame - -@item STARTT -the time in seconds of the first frame - -@item INTERLACED -tell if the current frame is interlaced - -@item T -the time in seconds of the current frame - -@item POS -original position in the file of the frame, or undefined if undefined -for the current frame - -@item PREV_INPTS -previous input PTS - -@item PREV_INT -previous input time in seconds - -@item PREV_OUTPTS -previous output PTS - -@item PREV_OUTT -previous output time in seconds - -@item RTCTIME -wallclock (RTC) time in microseconds. This is deprecated, use time(0) -instead. - -@item RTCSTART -wallclock (RTC) time at the start of the movie in microseconds - -@item TB -timebase of the input timestamps - -@end table - -@subsection Examples - -@itemize -@item -Start counting PTS from zero -@example -setpts=PTS-STARTPTS -@end example - -@item -Apply fast motion effect: -@example -setpts=0.5*PTS -@end example - -@item -Apply slow motion effect: -@example -setpts=2.0*PTS -@end example - -@item -Set fixed rate of 25 frames per second: -@example -setpts=N/(25*TB) -@end example - -@item -Set fixed rate 25 fps with some jitter: -@example -setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))' -@end example - -@item -Apply an offset of 10 seconds to the input PTS: -@example -setpts=PTS+10/TB -@end example - -@item -Generate timestamps from a "live source" and rebase onto the current timebase: -@example -setpts='(RTCTIME - RTCSTART) / (TB * 1000000)' -@end example - -@item -Generate timestamps by counting samples: -@example -asetpts=N/SR/TB -@end example - -@end itemize - -@section settb, asettb - -Set the timebase to use for the output frames timestamps. -It is mainly useful for testing timebase configuration. - -This filter accepts the following options: - -@table @option - -@item expr, tb -The expression which is evaluated into the output timebase. - -@end table - -The value for @option{tb} is an arithmetic expression representing a -rational. The expression can contain the constants "AVTB" (the default -timebase), "intb" (the input timebase) and "sr" (the sample rate, -audio only). Default value is "intb". - -@subsection Examples - -@itemize -@item -Set the timebase to 1/25: -@example -settb=expr=1/25 -@end example - -@item -Set the timebase to 1/10: -@example -settb=expr=0.1 -@end example - -@item -Set the timebase to 1001/1000: -@example -settb=1+0.001 -@end example - -@item -Set the timebase to 2*intb: -@example -settb=2*intb -@end example - -@item -Set the default timebase value: -@example -settb=AVTB -@end example -@end itemize - -@section showspectrum - -Convert input audio to a video output, representing the audio frequency -spectrum. - -The filter accepts the following options: - -@table @option -@item size, s -Specify the video size for the output. For the syntax of this option, check -the "Video size" section in the ffmpeg-utils manual. Default value is -@code{640x512}. - -@item slide -Specify if the spectrum should slide along the window. Default value is -@code{0}. - -@item mode -Specify display mode. - -It accepts the following values: -@table @samp -@item combined -all channels are displayed in the same row -@item separate -all channels are displayed in separate rows -@end table - -Default value is @samp{combined}. - -@item color -Specify display color mode. - -It accepts the following values: -@table @samp -@item channel -each channel is displayed in a separate color -@item intensity -each channel is is displayed using the same color scheme -@end table - -Default value is @samp{channel}. - -@item scale -Specify scale used for calculating intensity color values. - -It accepts the following values: -@table @samp -@item lin -linear -@item sqrt -square root, default -@item cbrt -cubic root -@item log -logarithmic -@end table - -Default value is @samp{sqrt}. - -@item saturation -Set saturation modifier for displayed colors. Negative values provide -alternative color scheme. @code{0} is no saturation at all. -Saturation must be in [-10.0, 10.0] range. -Default value is @code{1}. - -@item win_func -Set window function. - -It accepts the following values: -@table @samp -@item none -No samples pre-processing (do not expect this to be faster) -@item hann -Hann window -@item hamming -Hamming window -@item blackman -Blackman window -@end table - -Default value is @code{hann}. -@end table - -The usage is very similar to the showwaves filter; see the examples in that -section. - -@subsection Examples - -@itemize -@item -Large window with logarithmic color scaling: -@example -showspectrum=s=1280x480:scale=log -@end example - -@item -Complete example for a colored and sliding spectrum per channel using @command{ffplay}: -@example -ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1]; - [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]' -@end example -@end itemize - -@section showwaves - -Convert input audio to a video output, representing the samples waves. - -The filter accepts the following options: - -@table @option -@item size, s -Specify the video size for the output. For the syntax of this option, check -the "Video size" section in the ffmpeg-utils manual. Default value -is "600x240". - -@item mode -Set display mode. - -Available values are: -@table @samp -@item point -Draw a point for each sample. - -@item line -Draw a vertical line for each sample. -@end table - -Default value is @code{point}. - -@item n -Set the number of samples which are printed on the same column. A -larger value will decrease the frame rate. Must be a positive -integer. This option can be set only if the value for @var{rate} -is not explicitly specified. - -@item rate, r -Set the (approximate) output frame rate. This is done by setting the -option @var{n}. Default value is "25". - -@end table - -@subsection Examples - -@itemize -@item -Output the input file audio and the corresponding video representation -at the same time: -@example -amovie=a.mp3,asplit[out0],showwaves[out1] -@end example - -@item -Create a synthetic signal and show it with showwaves, forcing a -frame rate of 30 frames per second: -@example -aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1] -@end example -@end itemize - -@section split, asplit - -Split input into several identical outputs. - -@code{asplit} works with audio input, @code{split} with video. - -The filter accepts a single parameter which specifies the number of outputs. If -unspecified, it defaults to 2. - -@subsection Examples - -@itemize -@item -Create two separate outputs from the same input: -@example -[in] split [out0][out1] -@end example - -@item -To create 3 or more outputs, you need to specify the number of -outputs, like in: -@example -[in] asplit=3 [out0][out1][out2] -@end example - -@item -Create two separate outputs from the same input, one cropped and -one padded: -@example -[in] split [splitout1][splitout2]; -[splitout1] crop=100:100:0:0 [cropout]; -[splitout2] pad=200:200:100:100 [padout]; -@end example - -@item -Create 5 copies of the input audio with @command{ffmpeg}: -@example -ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT -@end example -@end itemize - -@section zmq, azmq - -Receive commands sent through a libzmq client, and forward them to -filters in the filtergraph. - -@code{zmq} and @code{azmq} work as a pass-through filters. @code{zmq} -must be inserted between two video filters, @code{azmq} between two -audio filters. - -To enable these filters you need to install the libzmq library and -headers and configure FFmpeg with @code{--enable-libzmq}. - -For more information about libzmq see: -@url{http://www.zeromq.org/} - -The @code{zmq} and @code{azmq} filters work as a libzmq server, which -receives messages sent through a network interface defined by the -@option{bind_address} option. - -The received message must be in the form: -@example -@var{TARGET} @var{COMMAND} [@var{ARG}] -@end example - -@var{TARGET} specifies the target of the command, usually the name of -the filter class or a specific filter instance name. - -@var{COMMAND} specifies the name of the command for the target filter. - -@var{ARG} is optional and specifies the optional argument list for the -given @var{COMMAND}. - -Upon reception, the message is processed and the corresponding command -is injected into the filtergraph. Depending on the result, the filter -will send a reply to the client, adopting the format: -@example -@var{ERROR_CODE} @var{ERROR_REASON} -@var{MESSAGE} -@end example - -@var{MESSAGE} is optional. - -@subsection Examples - -Look at @file{tools/zmqsend} for an example of a zmq client which can -be used to send commands processed by these filters. - -Consider the following filtergraph generated by @command{ffplay} -@example -ffplay -dumpgraph 1 -f lavfi " -color=s=100x100:c=red [l]; -color=s=100x100:c=blue [r]; -nullsrc=s=200x100, zmq [bg]; -[bg][l] overlay [bg+l]; -[bg+l][r] overlay=x=100 " -@end example - -To change the color of the left side of the video, the following -command can be used: -@example -echo Parsed_color_0 c yellow | tools/zmqsend -@end example - -To change the right side: -@example -echo Parsed_color_1 c pink | tools/zmqsend -@end example - -@c man end MULTIMEDIA FILTERS - -@chapter Multimedia Sources -@c man begin MULTIMEDIA SOURCES - -Below is a description of the currently available multimedia sources. - -@section amovie - -This is the same as @ref{movie} source, except it selects an audio -stream by default. - -@anchor{movie} -@section movie - -Read audio and/or video stream(s) from a movie container. - -This filter accepts the following options: - -@table @option -@item filename -The name of the resource to read (not necessarily a file but also a device or a -stream accessed through some protocol). - -@item format_name, f -Specifies the format assumed for the movie to read, and can be either -the name of a container or an input device. If not specified the -format is guessed from @var{movie_name} or by probing. - -@item seek_point, sp -Specifies the seek point in seconds, the frames will be output -starting from this seek point, the parameter is evaluated with -@code{av_strtod} so the numerical value may be suffixed by an IS -postfix. Default value is "0". - -@item streams, s -Specifies the streams to read. Several streams can be specified, -separated by "+". The source will then have as many outputs, in the -same order. The syntax is explained in the ``Stream specifiers'' -section in the ffmpeg manual. Two special names, "dv" and "da" specify -respectively the default (best suited) video and audio stream. Default -is "dv", or "da" if the filter is called as "amovie". - -@item stream_index, si -Specifies the index of the video stream to read. If the value is -1, -the best suited video stream will be automatically selected. Default -value is "-1". Deprecated. If the filter is called "amovie", it will select -audio instead of video. - -@item loop -Specifies how many times to read the stream in sequence. -If the value is less than 1, the stream will be read again and again. -Default value is "1". - -Note that when the movie is looped the source timestamps are not -changed, so it will generate non monotonically increasing timestamps. -@end table - -This filter allows to overlay a second video on top of main input of -a filtergraph as shown in this graph: -@example -input -----------> deltapts0 --> overlay --> output - ^ - | -movie --> scale--> deltapts1 -------+ -@end example - -@subsection Examples - -@itemize -@item -Skip 3.2 seconds from the start of the avi file in.avi, and overlay it -on top of the input labelled as "in": -@example -movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over]; -[in] setpts=PTS-STARTPTS [main]; -[main][over] overlay=16:16 [out] -@end example - -@item -Read from a video4linux2 device, and overlay it on top of the input -labelled as "in": -@example -movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over]; -[in] setpts=PTS-STARTPTS [main]; -[main][over] overlay=16:16 [out] -@end example - -@item -Read the first video stream and the audio stream with id 0x81 from -dvd.vob; the video is connected to the pad named "video" and the audio is -connected to the pad named "audio": -@example -movie=dvd.vob:s=v:0+#0x81 [video] [audio] -@end example -@end itemize - -@c man end MULTIMEDIA SOURCES diff --git a/ffmpeg/doc/general.texi b/ffmpeg/doc/general.texi deleted file mode 100644 index 0ac6455..0000000 --- a/ffmpeg/doc/general.texi +++ /dev/null @@ -1,1060 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle General Documentation -@titlepage -@center @titlefont{General Documentation} -@end titlepage - -@top - -@contents - -@chapter External libraries - -FFmpeg can be hooked up with a number of external libraries to add support -for more formats. None of them are used by default, their use has to be -explicitly requested by passing the appropriate flags to -@command{./configure}. - -@section OpenJPEG - -FFmpeg can use the OpenJPEG libraries for encoding/decoding J2K videos. Go to -@url{http://www.openjpeg.org/} to get the libraries and follow the installation -instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjpeg} to -@file{./configure}. - - -@section OpenCORE, VisualOn, and Fraunhofer libraries - -Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer -libraries provide encoders for a number of audio codecs. - -@float NOTE -OpenCORE and VisualOn libraries are under the Apache License 2.0 -(see @url{http://www.apache.org/licenses/LICENSE-2.0} for details), which is -incompatible to the LGPL version 2.1 and GPL version 2. You have to -upgrade FFmpeg's license to LGPL version 3 (or if you have enabled -GPL components, GPL version 3) by passing @code{--enable-version3} to configure in -order to use it. - -The Fraunhofer AAC library is licensed under a license incompatible to the GPL -and is not known to be compatible to the LGPL. Therefore, you have to pass -@code{--enable-nonfree} to configure to use it. -@end float - -@subsection OpenCORE AMR - -FFmpeg can make use of the OpenCORE libraries for AMR-NB -decoding/encoding and AMR-WB decoding. - -Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the -instructions for installing the libraries. -Then pass @code{--enable-libopencore-amrnb} and/or -@code{--enable-libopencore-amrwb} to configure to enable them. - -@subsection VisualOn AAC encoder library - -FFmpeg can make use of the VisualOn AACenc library for AAC encoding. - -Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the -instructions for installing the library. -Then pass @code{--enable-libvo-aacenc} to configure to enable it. - -@subsection VisualOn AMR-WB encoder library - -FFmpeg can make use of the VisualOn AMR-WBenc library for AMR-WB encoding. - -Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the -instructions for installing the library. -Then pass @code{--enable-libvo-amrwbenc} to configure to enable it. - -@subsection Fraunhofer AAC library - -FFmpeg can make use of the Fraunhofer AAC library for AAC encoding. - -Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the -instructions for installing the library. -Then pass @code{--enable-libfdk-aac} to configure to enable it. - -@section LAME - -FFmpeg can make use of the LAME library for MP3 encoding. - -Go to @url{http://lame.sourceforge.net/} and follow the -instructions for installing the library. -Then pass @code{--enable-libmp3lame} to configure to enable it. - -@section TwoLAME - -FFmpeg can make use of the TwoLAME library for MP2 encoding. - -Go to @url{http://www.twolame.org/} and follow the -instructions for installing the library. -Then pass @code{--enable-libtwolame} to configure to enable it. - -@section libvpx - -FFmpeg can make use of the libvpx library for VP8/VP9 encoding. - -Go to @url{http://www.webmproject.org/} and follow the instructions for -installing the library. Then pass @code{--enable-libvpx} to configure to -enable it. - -@section libwavpack - -FFmpeg can make use of the libwavpack library for WavPack encoding. - -Go to @url{http://www.wavpack.com/} and follow the instructions for -installing the library. Then pass @code{--enable-libwavpack} to configure to -enable it. - -@section x264 - -FFmpeg can make use of the x264 library for H.264 encoding. - -Go to @url{http://www.videolan.org/developers/x264.html} and follow the -instructions for installing the library. Then pass @code{--enable-libx264} to -configure to enable it. - -@float NOTE -x264 is under the GNU Public License Version 2 or later -(see @url{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} for -details), you must upgrade FFmpeg's license to GPL in order to use it. -@end float - -@section libilbc - -iLBC is a narrowband speech codec that has been made freely available -by Google as part of the WebRTC project. libilbc is a packaging friendly -copy of the iLBC codec. FFmpeg can make use of the libilbc library for -iLBC encoding and decoding. - -Go to @url{https://github.com/dekkers/libilbc} and follow the instructions for -installing the library. Then pass @code{--enable-libilbc} to configure to -enable it. - -@section libzvbi - -libzvbi is a VBI decoding library which can be used by FFmpeg to decode DVB -teletext pages and DVB teletext subtitles. - -Go to @url{http://sourceforge.net/projects/zapping/} and follow the instructions for -installing the library. Then pass @code{--enable-libzvbi} to configure to -enable it. - -@float NOTE -libzvbi is licensed under the GNU General Public License Version 2 or later -(see @url{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} for details), -you must upgrade FFmpeg's license to GPL in order to use it. -@end float - - -@chapter Supported File Formats, Codecs or Features - -You can use the @code{-formats} and @code{-codecs} options to have an exhaustive list. - -@section File Formats - -FFmpeg supports the following file formats through the @code{libavformat} -library: - -@multitable @columnfractions .4 .1 .1 .4 -@item Name @tab Encoding @tab Decoding @tab Comments -@item 4xm @tab @tab X - @tab 4X Technologies format, used in some games. -@item 8088flex TMV @tab @tab X -@item ACT Voice @tab @tab X - @tab contains G.729 audio -@item Adobe Filmstrip @tab X @tab X -@item Audio IFF (AIFF) @tab X @tab X -@item American Laser Games MM @tab @tab X - @tab Multimedia format used in games like Mad Dog McCree. -@item 3GPP AMR @tab X @tab X -@item Amazing Studio Packed Animation File @tab @tab X - @tab Multimedia format used in game Heart Of Darkness. -@item Apple HTTP Live Streaming @tab @tab X -@item Artworx Data Format @tab @tab X -@item ADP @tab @tab X - @tab Audio format used on the Nintendo Gamecube. -@item AFC @tab @tab X - @tab Audio format used on the Nintendo Gamecube. -@item ASF @tab X @tab X -@item AST @tab X @tab X - @tab Audio format used on the Nintendo Wii. -@item AVI @tab X @tab X -@item AviSynth @tab @tab X -@item AVR @tab @tab X - @tab Audio format used on Mac. -@item AVS @tab @tab X - @tab Multimedia format used by the Creature Shock game. -@item Beam Software SIFF @tab @tab X - @tab Audio and video format used in some games by Beam Software. -@item Bethesda Softworks VID @tab @tab X - @tab Used in some games from Bethesda Softworks. -@item Binary text @tab @tab X -@item Bink @tab @tab X - @tab Multimedia format used by many games. -@item Bitmap Brothers JV @tab @tab X - @tab Used in Z and Z95 games. -@item Brute Force & Ignorance @tab @tab X - @tab Used in the game Flash Traffic: City of Angels. -@item BRSTM @tab @tab X - @tab Audio format used on the Nintendo Wii. -@item BWF @tab X @tab X -@item CRI ADX @tab X @tab X - @tab Audio-only format used in console video games. -@item Discworld II BMV @tab @tab X -@item Interplay C93 @tab @tab X - @tab Used in the game Cyberia from Interplay. -@item Delphine Software International CIN @tab @tab X - @tab Multimedia format used by Delphine Software games. -@item CD+G @tab @tab X - @tab Video format used by CD+G karaoke disks -@item Commodore CDXL @tab @tab X - @tab Amiga CD video format -@item Core Audio Format @tab X @tab X - @tab Apple Core Audio Format -@item CRC testing format @tab X @tab -@item Creative Voice @tab X @tab X - @tab Created for the Sound Blaster Pro. -@item CRYO APC @tab @tab X - @tab Audio format used in some games by CRYO Interactive Entertainment. -@item D-Cinema audio @tab X @tab X -@item Deluxe Paint Animation @tab @tab X -@item DFA @tab @tab X - @tab This format is used in Chronomaster game -@item DV video @tab X @tab X -@item DXA @tab @tab X - @tab This format is used in the non-Windows version of the Feeble Files - game and different game cutscenes repacked for use with ScummVM. -@item Electronic Arts cdata @tab @tab X -@item Electronic Arts Multimedia @tab @tab X - @tab Used in various EA games; files have extensions like WVE and UV2. -@item Ensoniq Paris Audio File @tab @tab X -@item FFM (FFserver live feed) @tab X @tab X -@item Flash (SWF) @tab X @tab X -@item Flash 9 (AVM2) @tab X @tab X - @tab Only embedded audio is decoded. -@item FLI/FLC/FLX animation @tab @tab X - @tab .fli/.flc files -@item Flash Video (FLV) @tab X @tab X - @tab Macromedia Flash video files -@item framecrc testing format @tab X @tab -@item FunCom ISS @tab @tab X - @tab Audio format used in various games from FunCom like The Longest Journey. -@item G.723.1 @tab X @tab X -@item G.729 BIT @tab X @tab X -@item G.729 raw @tab @tab X -@item GIF Animation @tab X @tab X -@item GXF @tab X @tab X - @tab General eXchange Format SMPTE 360M, used by Thomson Grass Valley - playout servers. -@item HNM @tab @tab X - @tab Only version 4 supported, used in some games from Cryo Interactive -@item iCEDraw File @tab @tab X -@item ICO @tab X @tab X - @tab Microsoft Windows ICO -@item id Quake II CIN video @tab @tab X -@item id RoQ @tab X @tab X - @tab Used in Quake III, Jedi Knight 2 and other computer games. -@item IEC61937 encapsulation @tab X @tab X -@item IFF @tab @tab X - @tab Interchange File Format -@item iLBC @tab X @tab X -@item Interplay MVE @tab @tab X - @tab Format used in various Interplay computer games. -@item IV8 @tab @tab X - @tab A format generated by IndigoVision 8000 video server. -@item IVF (On2) @tab X @tab X - @tab A format used by libvpx -@item IRCAM @tab X @tab X -@item LATM @tab X @tab X -@item LMLM4 @tab @tab X - @tab Used by Linux Media Labs MPEG-4 PCI boards -@item LOAS @tab @tab X - @tab contains LATM multiplexed AAC audio -@item LVF @tab @tab X -@item LXF @tab @tab X - @tab VR native stream format, used by Leitch/Harris' video servers. -@item Matroska @tab X @tab X -@item Matroska audio @tab X @tab -@item FFmpeg metadata @tab X @tab X - @tab Metadata in text format. -@item MAXIS XA @tab @tab X - @tab Used in Sim City 3000; file extension .xa. -@item MD Studio @tab @tab X -@item Metal Gear Solid: The Twin Snakes @tab @tab X -@item Megalux Frame @tab @tab X - @tab Used by Megalux Ultimate Paint -@item Mobotix .mxg @tab @tab X -@item Monkey's Audio @tab @tab X -@item Motion Pixels MVI @tab @tab X -@item MOV/QuickTime/MP4 @tab X @tab X - @tab 3GP, 3GP2, PSP, iPod variants supported -@item MP2 @tab X @tab X -@item MP3 @tab X @tab X -@item MPEG-1 System @tab X @tab X - @tab muxed audio and video, VCD format supported -@item MPEG-PS (program stream) @tab X @tab X - @tab also known as @code{VOB} file, SVCD and DVD format supported -@item MPEG-TS (transport stream) @tab X @tab X - @tab also known as DVB Transport Stream -@item MPEG-4 @tab X @tab X - @tab MPEG-4 is a variant of QuickTime. -@item MIME multipart JPEG @tab X @tab -@item MSN TCP webcam @tab @tab X - @tab Used by MSN Messenger webcam streams. -@item MTV @tab @tab X -@item Musepack @tab @tab X -@item Musepack SV8 @tab @tab X -@item Material eXchange Format (MXF) @tab X @tab X - @tab SMPTE 377M, used by D-Cinema, broadcast industry. -@item Material eXchange Format (MXF), D-10 Mapping @tab X @tab X - @tab SMPTE 386M, D-10/IMX Mapping. -@item NC camera feed @tab @tab X - @tab NC (AVIP NC4600) camera streams -@item NIST SPeech HEader REsources @tab @tab X -@item NTT TwinVQ (VQF) @tab @tab X - @tab Nippon Telegraph and Telephone Corporation TwinVQ. -@item Nullsoft Streaming Video @tab @tab X -@item NuppelVideo @tab @tab X -@item NUT @tab X @tab X - @tab NUT Open Container Format -@item Ogg @tab X @tab X -@item Playstation Portable PMP @tab @tab X -@item Portable Voice Format @tab @tab X -@item TechnoTrend PVA @tab @tab X - @tab Used by TechnoTrend DVB PCI boards. -@item QCP @tab @tab X -@item raw ADTS (AAC) @tab X @tab X -@item raw AC-3 @tab X @tab X -@item raw Chinese AVS video @tab X @tab X -@item raw CRI ADX @tab X @tab X -@item raw Dirac @tab X @tab X -@item raw DNxHD @tab X @tab X -@item raw DTS @tab X @tab X -@item raw DTS-HD @tab @tab X -@item raw E-AC-3 @tab X @tab X -@item raw FLAC @tab X @tab X -@item raw GSM @tab @tab X -@item raw H.261 @tab X @tab X -@item raw H.263 @tab X @tab X -@item raw H.264 @tab X @tab X -@item raw HEVC @tab @tab X -@item raw Ingenient MJPEG @tab @tab X -@item raw MJPEG @tab X @tab X -@item raw MLP @tab @tab X -@item raw MPEG @tab @tab X -@item raw MPEG-1 @tab @tab X -@item raw MPEG-2 @tab @tab X -@item raw MPEG-4 @tab X @tab X -@item raw NULL @tab X @tab -@item raw video @tab X @tab X -@item raw id RoQ @tab X @tab -@item raw Shorten @tab @tab X -@item raw TAK @tab @tab X -@item raw TrueHD @tab X @tab X -@item raw VC-1 @tab X @tab X -@item raw PCM A-law @tab X @tab X -@item raw PCM mu-law @tab X @tab X -@item raw PCM signed 8 bit @tab X @tab X -@item raw PCM signed 16 bit big-endian @tab X @tab X -@item raw PCM signed 16 bit little-endian @tab X @tab X -@item raw PCM signed 24 bit big-endian @tab X @tab X -@item raw PCM signed 24 bit little-endian @tab X @tab X -@item raw PCM signed 32 bit big-endian @tab X @tab X -@item raw PCM signed 32 bit little-endian @tab X @tab X -@item raw PCM unsigned 8 bit @tab X @tab X -@item raw PCM unsigned 16 bit big-endian @tab X @tab X -@item raw PCM unsigned 16 bit little-endian @tab X @tab X -@item raw PCM unsigned 24 bit big-endian @tab X @tab X -@item raw PCM unsigned 24 bit little-endian @tab X @tab X -@item raw PCM unsigned 32 bit big-endian @tab X @tab X -@item raw PCM unsigned 32 bit little-endian @tab X @tab X -@item raw PCM floating-point 32 bit big-endian @tab X @tab X -@item raw PCM floating-point 32 bit little-endian @tab X @tab X -@item raw PCM floating-point 64 bit big-endian @tab X @tab X -@item raw PCM floating-point 64 bit little-endian @tab X @tab X -@item RDT @tab @tab X -@item REDCODE R3D @tab @tab X - @tab File format used by RED Digital cameras, contains JPEG 2000 frames and PCM audio. -@item RealMedia @tab X @tab X -@item Redirector @tab @tab X -@item RedSpark @tab @tab X -@item Renderware TeXture Dictionary @tab @tab X -@item RL2 @tab @tab X - @tab Audio and video format used in some games by Entertainment Software Partners. -@item RPL/ARMovie @tab @tab X -@item Lego Mindstorms RSO @tab X @tab X -@item RSD @tab @tab X -@item RTMP @tab X @tab X - @tab Output is performed by publishing stream to RTMP server -@item RTP @tab X @tab X -@item RTSP @tab X @tab X -@item SAP @tab X @tab X -@item SBG @tab @tab X -@item SDP @tab @tab X -@item Sega FILM/CPK @tab @tab X - @tab Used in many Sega Saturn console games. -@item Silicon Graphics Movie @tab @tab X -@item Sierra SOL @tab @tab X - @tab .sol files used in Sierra Online games. -@item Sierra VMD @tab @tab X - @tab Used in Sierra CD-ROM games. -@item Smacker @tab @tab X - @tab Multimedia format used by many games. -@item SMJPEG @tab X @tab X - @tab Used in certain Loki game ports. -@item Smush @tab @tab X - @tab Multimedia format used in some LucasArts games. -@item Sony OpenMG (OMA) @tab X @tab X - @tab Audio format used in Sony Sonic Stage and Sony Vegas. -@item Sony PlayStation STR @tab @tab X -@item Sony Wave64 (W64) @tab X @tab X -@item SoX native format @tab X @tab X -@item SUN AU format @tab X @tab X -@item Text files @tab @tab X -@item THP @tab @tab X - @tab Used on the Nintendo GameCube. -@item Tiertex Limited SEQ @tab @tab X - @tab Tiertex .seq files used in the DOS CD-ROM version of the game Flashback. -@item True Audio @tab @tab X -@item VC-1 test bitstream @tab X @tab X -@item Vivo @tab @tab X -@item WAV @tab X @tab X -@item WavPack @tab X @tab X -@item WebM @tab X @tab X -@item Windows Televison (WTV) @tab X @tab X -@item Wing Commander III movie @tab @tab X - @tab Multimedia format used in Origin's Wing Commander III computer game. -@item Westwood Studios audio @tab @tab X - @tab Multimedia format used in Westwood Studios games. -@item Westwood Studios VQA @tab @tab X - @tab Multimedia format used in Westwood Studios games. -@item XMV @tab @tab X - @tab Microsoft video container used in Xbox games. -@item xWMA @tab @tab X - @tab Microsoft audio container used by XAudio 2. -@item eXtended BINary text (XBIN) @tab @tab X -@item YUV4MPEG pipe @tab X @tab X -@item Psygnosis YOP @tab @tab X -@end multitable - -@code{X} means that encoding (resp. decoding) is supported. - -@section Image Formats - -FFmpeg can read and write images for each frame of a video sequence. The -following image formats are supported: - -@multitable @columnfractions .4 .1 .1 .4 -@item Name @tab Encoding @tab Decoding @tab Comments -@item .Y.U.V @tab X @tab X - @tab one raw file per component -@item animated GIF @tab X @tab X -@item BMP @tab X @tab X - @tab Microsoft BMP image -@item PIX @tab @tab X - @tab PIX is an image format used in the Argonaut BRender engine. -@item DPX @tab X @tab X - @tab Digital Picture Exchange -@item EXR @tab @tab X - @tab OpenEXR -@item JPEG @tab X @tab X - @tab Progressive JPEG is not supported. -@item JPEG 2000 @tab X @tab X -@item JPEG-LS @tab X @tab X -@item LJPEG @tab X @tab - @tab Lossless JPEG -@item PAM @tab X @tab X - @tab PAM is a PNM extension with alpha support. -@item PBM @tab X @tab X - @tab Portable BitMap image -@item PCX @tab X @tab X - @tab PC Paintbrush -@item PGM @tab X @tab X - @tab Portable GrayMap image -@item PGMYUV @tab X @tab X - @tab PGM with U and V components in YUV 4:2:0 -@item PIC @tab @tab X - @tab Pictor/PC Paint -@item PNG @tab X @tab X -@item PPM @tab X @tab X - @tab Portable PixelMap image -@item PTX @tab @tab X - @tab V.Flash PTX format -@item SGI @tab X @tab X - @tab SGI RGB image format -@item Sun Rasterfile @tab X @tab X - @tab Sun RAS image format -@item TIFF @tab X @tab X - @tab YUV, JPEG and some extension is not supported yet. -@item Truevision Targa @tab X @tab X - @tab Targa (.TGA) image format -@item WebP @tab E @tab X - @tab WebP image format, encoding supported through external library libwebp -@item XBM @tab X @tab X - @tab X BitMap image format -@item XFace @tab X @tab X - @tab X-Face image format -@item XWD @tab X @tab X - @tab X Window Dump image format -@end multitable - -@code{X} means that encoding (resp. decoding) is supported. - -@code{E} means that support is provided through an external library. - -@section Video Codecs - -@multitable @columnfractions .4 .1 .1 .4 -@item Name @tab Encoding @tab Decoding @tab Comments -@item 4X Movie @tab @tab X - @tab Used in certain computer games. -@item 8088flex TMV @tab @tab X -@item A64 multicolor @tab X @tab - @tab Creates video suitable to be played on a commodore 64 (multicolor mode). -@item Amazing Studio PAF Video @tab @tab X -@item American Laser Games MM @tab @tab X - @tab Used in games like Mad Dog McCree. -@item AMV Video @tab X @tab X - @tab Used in Chinese MP3 players. -@item ANSI/ASCII art @tab @tab X -@item Apple Intermediate Codec @tab @tab X -@item Apple MJPEG-B @tab @tab X -@item Apple ProRes @tab X @tab X -@item Apple QuickDraw @tab @tab X - @tab fourcc: qdrw -@item Asus v1 @tab X @tab X - @tab fourcc: ASV1 -@item Asus v2 @tab X @tab X - @tab fourcc: ASV2 -@item ATI VCR1 @tab @tab X - @tab fourcc: VCR1 -@item ATI VCR2 @tab @tab X - @tab fourcc: VCR2 -@item Auravision Aura @tab @tab X -@item Auravision Aura 2 @tab @tab X -@item Autodesk Animator Flic video @tab @tab X -@item Autodesk RLE @tab @tab X - @tab fourcc: AASC -@item Avid 1:1 10-bit RGB Packer @tab X @tab X - @tab fourcc: AVrp -@item AVS (Audio Video Standard) video @tab @tab X - @tab Video encoding used by the Creature Shock game. -@item AYUV @tab X @tab X - @tab Microsoft uncompressed packed 4:4:4:4 -@item Beam Software VB @tab @tab X -@item Bethesda VID video @tab @tab X - @tab Used in some games from Bethesda Softworks. -@item Bink Video @tab @tab X -@item Bitmap Brothers JV video @tab @tab X -@item y41p Brooktree uncompressed 4:1:1 12-bit @tab X @tab X -@item Brute Force & Ignorance @tab @tab X - @tab Used in the game Flash Traffic: City of Angels. -@item C93 video @tab @tab X - @tab Codec used in Cyberia game. -@item CamStudio @tab @tab X - @tab fourcc: CSCD -@item CD+G @tab @tab X - @tab Video codec for CD+G karaoke disks -@item CDXL @tab @tab X - @tab Amiga CD video codec -@item Chinese AVS video @tab E @tab X - @tab AVS1-P2, JiZhun profile, encoding through external library libxavs -@item Delphine Software International CIN video @tab @tab X - @tab Codec used in Delphine Software International games. -@item Discworld II BMV Video @tab @tab X -@item Canopus Lossless Codec @tab @tab X -@item Cinepak @tab @tab X -@item Cirrus Logic AccuPak @tab X @tab X - @tab fourcc: CLJR -@item CPiA Video Format @tab @tab X -@item Creative YUV (CYUV) @tab @tab X -@item DFA @tab @tab X - @tab Codec used in Chronomaster game. -@item Dirac @tab E @tab X - @tab supported through external library libschroedinger -@item Deluxe Paint Animation @tab @tab X -@item DNxHD @tab X @tab X - @tab aka SMPTE VC3 -@item Duck TrueMotion 1.0 @tab @tab X - @tab fourcc: DUCK -@item Duck TrueMotion 2.0 @tab @tab X - @tab fourcc: TM20 -@item DV (Digital Video) @tab X @tab X -@item Dxtory capture format @tab @tab X -@item Feeble Files/ScummVM DXA @tab @tab X - @tab Codec originally used in Feeble Files game. -@item Electronic Arts CMV video @tab @tab X - @tab Used in NHL 95 game. -@item Electronic Arts Madcow video @tab @tab X -@item Electronic Arts TGV video @tab @tab X -@item Electronic Arts TGQ video @tab @tab X -@item Electronic Arts TQI video @tab @tab X -@item Escape 124 @tab @tab X -@item Escape 130 @tab @tab X -@item FFmpeg video codec #1 @tab X @tab X - @tab lossless codec (fourcc: FFV1) -@item Flash Screen Video v1 @tab X @tab X - @tab fourcc: FSV1 -@item Flash Screen Video v2 @tab X @tab X -@item Flash Video (FLV) @tab X @tab X - @tab Sorenson H.263 used in Flash -@item Forward Uncompressed @tab @tab X -@item Fraps @tab @tab X -@item Go2Webinar @tab @tab X - @tab fourcc: G2M4 -@item H.261 @tab X @tab X -@item H.263 / H.263-1996 @tab X @tab X -@item H.263+ / H.263-1998 / H.263 version 2 @tab X @tab X -@item H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 @tab E @tab X - @tab encoding supported through external library libx264 -@item HEVC @tab @tab X -@item HNM version 4 @tab @tab X -@item HuffYUV @tab X @tab X -@item HuffYUV FFmpeg variant @tab X @tab X -@item IBM Ultimotion @tab @tab X - @tab fourcc: ULTI -@item id Cinematic video @tab @tab X - @tab Used in Quake II. -@item id RoQ video @tab X @tab X - @tab Used in Quake III, Jedi Knight 2, other computer games. -@item IFF ILBM @tab @tab X - @tab IFF interleaved bitmap -@item IFF ByteRun1 @tab @tab X - @tab IFF run length encoded bitmap -@item Intel H.263 @tab @tab X -@item Intel Indeo 2 @tab @tab X -@item Intel Indeo 3 @tab @tab X -@item Intel Indeo 4 @tab @tab X -@item Intel Indeo 5 @tab @tab X -@item Interplay C93 @tab @tab X - @tab Used in the game Cyberia from Interplay. -@item Interplay MVE video @tab @tab X - @tab Used in Interplay .MVE files. -@item J2K @tab X @tab X -@item Karl Morton's video codec @tab @tab X - @tab Codec used in Worms games. -@item Kega Game Video (KGV1) @tab @tab X - @tab Kega emulator screen capture codec. -@item Lagarith @tab @tab X -@item LCL (LossLess Codec Library) MSZH @tab @tab X -@item LCL (LossLess Codec Library) ZLIB @tab E @tab E -@item LOCO @tab @tab X -@item LucasArts Smush @tab @tab X - @tab Used in LucasArts games. -@item lossless MJPEG @tab X @tab X -@item Microsoft ATC Screen @tab @tab X - @tab Also known as Microsoft Screen 3. -@item Microsoft Expression Encoder Screen @tab @tab X - @tab Also known as Microsoft Titanium Screen 2. -@item Microsoft RLE @tab @tab X -@item Microsoft Screen 1 @tab @tab X - @tab Also known as Windows Media Video V7 Screen. -@item Microsoft Screen 2 @tab @tab X - @tab Also known as Windows Media Video V9 Screen. -@item Microsoft Video 1 @tab @tab X -@item Mimic @tab @tab X - @tab Used in MSN Messenger Webcam streams. -@item Miro VideoXL @tab @tab X - @tab fourcc: VIXL -@item MJPEG (Motion JPEG) @tab X @tab X -@item Mobotix MxPEG video @tab @tab X -@item Motion Pixels video @tab @tab X -@item MPEG-1 video @tab X @tab X -@item MPEG-2 video @tab X @tab X -@item MPEG-4 part 2 @tab X @tab X - @tab libxvidcore can be used alternatively for encoding. -@item MPEG-4 part 2 Microsoft variant version 1 @tab @tab X -@item MPEG-4 part 2 Microsoft variant version 2 @tab X @tab X -@item MPEG-4 part 2 Microsoft variant version 3 @tab X @tab X -@item Nintendo Gamecube THP video @tab @tab X -@item NuppelVideo/RTjpeg @tab @tab X - @tab Video encoding used in NuppelVideo files. -@item On2 VP3 @tab @tab X - @tab still experimental -@item On2 VP5 @tab @tab X - @tab fourcc: VP50 -@item On2 VP6 @tab @tab X - @tab fourcc: VP60,VP61,VP62 -@item VP8 @tab E @tab X - @tab fourcc: VP80, encoding supported through external library libvpx -@item VP9 @tab E @tab X - @tab encoding supported through external library libvpx -@item Pinnacle TARGA CineWave YUV16 @tab @tab X - @tab fourcc: Y216 -@item Prores @tab @tab X - @tab fourcc: apch,apcn,apcs,apco -@item Q-team QPEG @tab @tab X - @tab fourccs: QPEG, Q1.0, Q1.1 -@item QuickTime 8BPS video @tab @tab X -@item QuickTime Animation (RLE) video @tab X @tab X - @tab fourcc: 'rle ' -@item QuickTime Graphics (SMC) @tab @tab X - @tab fourcc: 'smc ' -@item QuickTime video (RPZA) @tab @tab X - @tab fourcc: rpza -@item R10K AJA Kona 10-bit RGB Codec @tab X @tab X -@item R210 Quicktime Uncompressed RGB 10-bit @tab X @tab X -@item Raw Video @tab X @tab X -@item RealVideo 1.0 @tab X @tab X -@item RealVideo 2.0 @tab X @tab X -@item RealVideo 3.0 @tab @tab X - @tab still far from ideal -@item RealVideo 4.0 @tab @tab X -@item Renderware TXD (TeXture Dictionary) @tab @tab X - @tab Texture dictionaries used by the Renderware Engine. -@item RL2 video @tab @tab X - @tab used in some games by Entertainment Software Partners -@item SGI RLE 8-bit @tab @tab X -@item Sierra VMD video @tab @tab X - @tab Used in Sierra VMD files. -@item Silicon Graphics Motion Video Compressor 1 (MVC1) @tab @tab X -@item Silicon Graphics Motion Video Compressor 2 (MVC2) @tab @tab X -@item Smacker video @tab @tab X - @tab Video encoding used in Smacker. -@item SMPTE VC-1 @tab @tab X -@item Snow @tab X @tab X - @tab experimental wavelet codec (fourcc: SNOW) -@item Sony PlayStation MDEC (Motion DECoder) @tab @tab X -@item Sorenson Vector Quantizer 1 @tab X @tab X - @tab fourcc: SVQ1 -@item Sorenson Vector Quantizer 3 @tab @tab X - @tab fourcc: SVQ3 -@item Sunplus JPEG (SP5X) @tab @tab X - @tab fourcc: SP5X -@item TechSmith Screen Capture Codec @tab @tab X - @tab fourcc: TSCC -@item TechSmith Screen Capture Codec 2 @tab @tab X - @tab fourcc: TSC2 -@item Theora @tab E @tab X - @tab encoding supported through external library libtheora -@item Tiertex Limited SEQ video @tab @tab X - @tab Codec used in DOS CD-ROM FlashBack game. -@item Ut Video @tab X @tab X -@item v210 QuickTime uncompressed 4:2:2 10-bit @tab X @tab X -@item v308 QuickTime uncompressed 4:4:4 @tab X @tab X -@item v408 QuickTime uncompressed 4:4:4:4 @tab X @tab X -@item v410 QuickTime uncompressed 4:4:4 10-bit @tab X @tab X -@item VBLE Lossless Codec @tab @tab X -@item VMware Screen Codec / VMware Video @tab @tab X - @tab Codec used in videos captured by VMware. -@item Westwood Studios VQA (Vector Quantized Animation) video @tab @tab X -@item Windows Media Image @tab @tab X -@item Windows Media Video 7 @tab X @tab X -@item Windows Media Video 8 @tab X @tab X -@item Windows Media Video 9 @tab @tab X - @tab not completely working -@item Wing Commander III / Xan @tab @tab X - @tab Used in Wing Commander III .MVE files. -@item Wing Commander IV / Xan @tab @tab X - @tab Used in Wing Commander IV. -@item Winnov WNV1 @tab @tab X -@item WMV7 @tab X @tab X -@item YAMAHA SMAF @tab X @tab X -@item Psygnosis YOP Video @tab @tab X -@item yuv4 @tab X @tab X - @tab libquicktime uncompressed packed 4:2:0 -@item ZeroCodec Lossless Video @tab @tab X -@item ZLIB @tab X @tab X - @tab part of LCL, encoder experimental -@item Zip Motion Blocks Video @tab X @tab X - @tab Encoder works only in PAL8. -@end multitable - -@code{X} means that encoding (resp. decoding) is supported. - -@code{E} means that support is provided through an external library. - -@section Audio Codecs - -@multitable @columnfractions .4 .1 .1 .4 -@item Name @tab Encoding @tab Decoding @tab Comments -@item 8SVX exponential @tab @tab X -@item 8SVX fibonacci @tab @tab X -@item AAC+ @tab E @tab X - @tab encoding supported through external library libaacplus -@item AAC @tab E @tab X - @tab encoding supported through external library libfaac and libvo-aacenc -@item AC-3 @tab IX @tab X -@item ADPCM 4X Movie @tab @tab X -@item ADPCM CDROM XA @tab @tab X -@item ADPCM Creative Technology @tab @tab X - @tab 16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2 -@item ADPCM Electronic Arts @tab @tab X - @tab Used in various EA titles. -@item ADPCM Electronic Arts Maxis CDROM XS @tab @tab X - @tab Used in Sim City 3000. -@item ADPCM Electronic Arts R1 @tab @tab X -@item ADPCM Electronic Arts R2 @tab @tab X -@item ADPCM Electronic Arts R3 @tab @tab X -@item ADPCM Electronic Arts XAS @tab @tab X -@item ADPCM G.722 @tab X @tab X -@item ADPCM G.726 @tab X @tab X -@item ADPCM IMA AMV @tab @tab X - @tab Used in AMV files -@item ADPCM IMA Electronic Arts EACS @tab @tab X -@item ADPCM IMA Electronic Arts SEAD @tab @tab X -@item ADPCM IMA Funcom @tab @tab X -@item ADPCM IMA QuickTime @tab X @tab X -@item ADPCM IMA Loki SDL MJPEG @tab @tab X -@item ADPCM IMA WAV @tab X @tab X -@item ADPCM IMA Westwood @tab @tab X -@item ADPCM ISS IMA @tab @tab X - @tab Used in FunCom games. -@item ADPCM IMA Dialogic @tab @tab X -@item ADPCM IMA Duck DK3 @tab @tab X - @tab Used in some Sega Saturn console games. -@item ADPCM IMA Duck DK4 @tab @tab X - @tab Used in some Sega Saturn console games. -@item ADPCM IMA Radical @tab @tab X -@item ADPCM Microsoft @tab X @tab X -@item ADPCM MS IMA @tab X @tab X -@item ADPCM Nintendo Gamecube AFC @tab @tab X -@item ADPCM Nintendo Gamecube DTK @tab @tab X -@item ADPCM Nintendo Gamecube THP @tab @tab X -@item ADPCM QT IMA @tab X @tab X -@item ADPCM SEGA CRI ADX @tab X @tab X - @tab Used in Sega Dreamcast games. -@item ADPCM Shockwave Flash @tab X @tab X -@item ADPCM Sound Blaster Pro 2-bit @tab @tab X -@item ADPCM Sound Blaster Pro 2.6-bit @tab @tab X -@item ADPCM Sound Blaster Pro 4-bit @tab @tab X -@item ADPCM Westwood Studios IMA @tab @tab X - @tab Used in Westwood Studios games like Command and Conquer. -@item ADPCM Yamaha @tab X @tab X -@item AMR-NB @tab E @tab X - @tab encoding supported through external library libopencore-amrnb -@item AMR-WB @tab E @tab X - @tab encoding supported through external library libvo-amrwbenc -@item Amazing Studio PAF Audio @tab @tab X -@item Apple lossless audio @tab X @tab X - @tab QuickTime fourcc 'alac' -@item ATRAC1 @tab @tab X -@item ATRAC3 @tab @tab X -@item Bink Audio @tab @tab X - @tab Used in Bink and Smacker files in many games. -@item CELT @tab @tab E - @tab decoding supported through external library libcelt -@item Delphine Software International CIN audio @tab @tab X - @tab Codec used in Delphine Software International games. -@item Discworld II BMV Audio @tab @tab X -@item COOK @tab @tab X - @tab All versions except 5.1 are supported. -@item DCA (DTS Coherent Acoustics) @tab X @tab X -@item DPCM id RoQ @tab X @tab X - @tab Used in Quake III, Jedi Knight 2 and other computer games. -@item DPCM Interplay @tab @tab X - @tab Used in various Interplay computer games. -@item DPCM Sierra Online @tab @tab X - @tab Used in Sierra Online game audio files. -@item DPCM Sol @tab @tab X -@item DPCM Xan @tab @tab X - @tab Used in Origin's Wing Commander IV AVI files. -@item DSP Group TrueSpeech @tab @tab X -@item DV audio @tab @tab X -@item Enhanced AC-3 @tab X @tab X -@item EVRC (Enhanced Variable Rate Codec) @tab @tab X -@item FLAC (Free Lossless Audio Codec) @tab X @tab IX -@item G.723.1 @tab X @tab X -@item G.729 @tab @tab X -@item GSM @tab E @tab X - @tab encoding supported through external library libgsm -@item GSM Microsoft variant @tab E @tab X - @tab encoding supported through external library libgsm -@item IAC (Indeo Audio Coder) @tab @tab X -@item iLBC (Internet Low Bitrate Codec) @tab E @tab E - @tab encoding and decoding supported through external library libilbc -@item IMC (Intel Music Coder) @tab @tab X -@item MACE (Macintosh Audio Compression/Expansion) 3:1 @tab @tab X -@item MACE (Macintosh Audio Compression/Expansion) 6:1 @tab @tab X -@item MLP (Meridian Lossless Packing) @tab @tab X - @tab Used in DVD-Audio discs. -@item Monkey's Audio @tab @tab X -@item MP1 (MPEG audio layer 1) @tab @tab IX -@item MP2 (MPEG audio layer 2) @tab IX @tab IX - @tab libtwolame can be used alternatively for encoding. -@item MP3 (MPEG audio layer 3) @tab E @tab IX - @tab encoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported -@item MPEG-4 Audio Lossless Coding (ALS) @tab @tab X -@item Musepack SV7 @tab @tab X -@item Musepack SV8 @tab @tab X -@item Nellymoser Asao @tab X @tab X -@item Opus @tab E @tab E - @tab supported through external library libopus -@item PCM A-law @tab X @tab X -@item PCM mu-law @tab X @tab X -@item PCM signed 8-bit planar @tab X @tab X -@item PCM signed 16-bit big-endian planar @tab X @tab X -@item PCM signed 16-bit little-endian planar @tab X @tab X -@item PCM signed 24-bit little-endian planar @tab X @tab X -@item PCM signed 32-bit little-endian planar @tab X @tab X -@item PCM 32-bit floating point big-endian @tab X @tab X -@item PCM 32-bit floating point little-endian @tab X @tab X -@item PCM 64-bit floating point big-endian @tab X @tab X -@item PCM 64-bit floating point little-endian @tab X @tab X -@item PCM D-Cinema audio signed 24-bit @tab X @tab X -@item PCM signed 8-bit @tab X @tab X -@item PCM signed 16-bit big-endian @tab X @tab X -@item PCM signed 16-bit little-endian @tab X @tab X -@item PCM signed 24-bit big-endian @tab X @tab X -@item PCM signed 24-bit little-endian @tab X @tab X -@item PCM signed 32-bit big-endian @tab X @tab X -@item PCM signed 32-bit little-endian @tab X @tab X -@item PCM signed 16/20/24-bit big-endian in MPEG-TS @tab @tab X -@item PCM unsigned 8-bit @tab X @tab X -@item PCM unsigned 16-bit big-endian @tab X @tab X -@item PCM unsigned 16-bit little-endian @tab X @tab X -@item PCM unsigned 24-bit big-endian @tab X @tab X -@item PCM unsigned 24-bit little-endian @tab X @tab X -@item PCM unsigned 32-bit big-endian @tab X @tab X -@item PCM unsigned 32-bit little-endian @tab X @tab X -@item PCM Zork @tab @tab X -@item QCELP / PureVoice @tab @tab X -@item QDesign Music Codec 2 @tab @tab X - @tab There are still some distortions. -@item RealAudio 1.0 (14.4K) @tab X @tab X - @tab Real 14400 bit/s codec -@item RealAudio 2.0 (28.8K) @tab @tab X - @tab Real 28800 bit/s codec -@item RealAudio 3.0 (dnet) @tab IX @tab X - @tab Real low bitrate AC-3 codec -@item RealAudio Lossless @tab @tab X -@item RealAudio SIPR / ACELP.NET @tab @tab X -@item Shorten @tab @tab X -@item Sierra VMD audio @tab @tab X - @tab Used in Sierra VMD files. -@item Smacker audio @tab @tab X -@item SMPTE 302M AES3 audio @tab X @tab X -@item Sonic @tab X @tab X - @tab experimental codec -@item Sonic lossless @tab X @tab X - @tab experimental codec -@item Speex @tab E @tab E - @tab supported through external library libspeex -@item TAK (Tom's lossless Audio Kompressor) @tab @tab X -@item True Audio (TTA) @tab X @tab X -@item TrueHD @tab @tab X - @tab Used in HD-DVD and Blu-Ray discs. -@item TwinVQ (VQF flavor) @tab @tab X -@item VIMA @tab @tab X - @tab Used in LucasArts SMUSH animations. -@item Vorbis @tab E @tab X - @tab A native but very primitive encoder exists. -@item Voxware MetaSound @tab @tab X -@item WavPack @tab X @tab X -@item Westwood Audio (SND1) @tab @tab X -@item Windows Media Audio 1 @tab X @tab X -@item Windows Media Audio 2 @tab X @tab X -@item Windows Media Audio Lossless @tab @tab X -@item Windows Media Audio Pro @tab @tab X -@item Windows Media Audio Voice @tab @tab X -@end multitable - -@code{X} means that encoding (resp. decoding) is supported. - -@code{E} means that support is provided through an external library. - -@code{I} means that an integer-only version is available, too (ensures high -performance on systems without hardware floating point support). - -@section Subtitle Formats - -@multitable @columnfractions .4 .1 .1 .1 .1 -@item Name @tab Muxing @tab Demuxing @tab Encoding @tab Decoding -@item 3GPP Timed Text @tab @tab @tab X @tab X -@item AQTitle @tab @tab X @tab @tab X -@item DVB @tab X @tab X @tab X @tab X -@item DVB teletext @tab @tab X @tab @tab E -@item DVD @tab X @tab X @tab X @tab X -@item JACOsub @tab X @tab X @tab @tab X -@item MicroDVD @tab X @tab X @tab @tab X -@item MPL2 @tab @tab X @tab @tab X -@item MPsub (MPlayer) @tab @tab X @tab @tab X -@item PGS @tab @tab @tab @tab X -@item PJS (Phoenix) @tab @tab X @tab @tab X -@item RealText @tab @tab X @tab @tab X -@item SAMI @tab @tab X @tab @tab X -@item SSA/ASS @tab X @tab X @tab X @tab X -@item SubRip (SRT) @tab X @tab X @tab X @tab X -@item SubViewer v1 @tab @tab X @tab @tab X -@item SubViewer @tab @tab X @tab @tab X -@item TED Talks captions @tab @tab X @tab @tab X -@item VobSub (IDX+SUB) @tab @tab X @tab @tab X -@item VPlayer @tab @tab X @tab @tab X -@item WebVTT @tab X @tab X @tab @tab X -@item XSUB @tab @tab @tab X @tab X -@end multitable - -@code{X} means that the feature is supported. - -@code{E} means that support is provided through an external library. - -@section Network Protocols - -@multitable @columnfractions .4 .1 -@item Name @tab Support -@item file @tab X -@item Gopher @tab X -@item HLS @tab X -@item HTTP @tab X -@item HTTPS @tab X -@item MMSH @tab X -@item MMST @tab X -@item pipe @tab X -@item RTMP @tab X -@item RTMPE @tab X -@item RTMPS @tab X -@item RTMPT @tab X -@item RTMPTE @tab X -@item RTMPTS @tab X -@item RTP @tab X -@item SCTP @tab X -@item TCP @tab X -@item TLS @tab X -@item UDP @tab X -@end multitable - -@code{X} means that the protocol is supported. - -@code{E} means that support is provided through an external library. - - -@section Input/Output Devices - -@multitable @columnfractions .4 .1 .1 -@item Name @tab Input @tab Output -@item ALSA @tab X @tab X -@item BKTR @tab X @tab -@item caca @tab @tab X -@item DV1394 @tab X @tab -@item Lavfi virtual device @tab X @tab -@item Linux framebuffer @tab X @tab -@item JACK @tab X @tab -@item LIBCDIO @tab X -@item LIBDC1394 @tab X @tab -@item OpenAL @tab X -@item OSS @tab X @tab X -@item Pulseaudio @tab X @tab -@item SDL @tab @tab X -@item Video4Linux2 @tab X @tab X -@item VfW capture @tab X @tab -@item X11 grabbing @tab X @tab -@end multitable - -@code{X} means that input/output is supported. - -@section Timecode - -@multitable @columnfractions .4 .1 .1 -@item Codec/format @tab Read @tab Write -@item AVI @tab X @tab X -@item DV @tab X @tab X -@item GXF @tab X @tab X -@item MOV @tab X @tab X -@item MPEG1/2 @tab X @tab X -@item MXF @tab X @tab X -@end multitable - -@bye diff --git a/ffmpeg/doc/git-howto.texi b/ffmpeg/doc/git-howto.texi deleted file mode 100644 index 44e1cc6..0000000 --- a/ffmpeg/doc/git-howto.texi +++ /dev/null @@ -1,415 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Using git to develop FFmpeg - -@titlepage -@center @titlefont{Using git to develop FFmpeg} -@end titlepage - -@top - -@contents - -@chapter Introduction - -This document aims in giving some quick references on a set of useful git -commands. You should always use the extensive and detailed documentation -provided directly by git: - -@example -git --help -man git -@end example - -shows you the available subcommands, - -@example -git <command> --help -man git-<command> -@end example - -shows information about the subcommand <command>. - -Additional information could be found on the -@url{http://gitref.org, Git Reference} website - -For more information about the Git project, visit the - -@url{http://git-scm.com/, Git website} - -Consult these resources whenever you have problems, they are quite exhaustive. - -What follows now is a basic introduction to Git and some FFmpeg-specific -guidelines to ease the contribution to the project - -@chapter Basics Usage - -@section Get GIT - -You can get git from @url{http://git-scm.com/} -Most distribution and operating system provide a package for it. - - -@section Cloning the source tree - -@example -git clone git://source.ffmpeg.org/ffmpeg <target> -@end example - -This will put the FFmpeg sources into the directory @var{<target>}. - -@example -git clone git@@source.ffmpeg.org:ffmpeg <target> -@end example - -This will put the FFmpeg sources into the directory @var{<target>} and let -you push back your changes to the remote repository. - -Make sure that you do not have Windows line endings in your checkouts, -otherwise you may experience spurious compilation failures. One way to -achieve this is to run - -@example -git config --global core.autocrlf false -@end example - - -@section Updating the source tree to the latest revision - -@example -git pull (--rebase) -@end example - -pulls in the latest changes from the tracked branch. The tracked branch -can be remote. By default the master branch tracks the branch master in -the remote origin. - -@float IMPORTANT -@command{--rebase} (see below) is recommended. -@end float - -@section Rebasing your local branches - -@example -git pull --rebase -@end example - -fetches the changes from the main repository and replays your local commits -over it. This is required to keep all your local changes at the top of -FFmpeg's master tree. The master tree will reject pushes with merge commits. - - -@section Adding/removing files/directories - -@example -git add [-A] <filename/dirname> -git rm [-r] <filename/dirname> -@end example - -GIT needs to get notified of all changes you make to your working -directory that makes files appear or disappear. -Line moves across files are automatically tracked. - - -@section Showing modifications - -@example -git diff <filename(s)> -@end example - -will show all local modifications in your working directory as unified diff. - - -@section Inspecting the changelog - -@example -git log <filename(s)> -@end example - -You may also use the graphical tools like gitview or gitk or the web -interface available at http://source.ffmpeg.org/ - -@section Checking source tree status - -@example -git status -@end example - -detects all the changes you made and lists what actions will be taken in case -of a commit (additions, modifications, deletions, etc.). - - -@section Committing - -@example -git diff --check -@end example - -to double check your changes before committing them to avoid trouble later -on. All experienced developers do this on each and every commit, no matter -how small. -Every one of them has been saved from looking like a fool by this many times. -It's very easy for stray debug output or cosmetic modifications to slip in, -please avoid problems through this extra level of scrutiny. - -For cosmetics-only commits you should get (almost) empty output from - -@example -git diff -w -b <filename(s)> -@end example - -Also check the output of - -@example -git status -@end example - -to make sure you don't have untracked files or deletions. - -@example -git add [-i|-p|-A] <filenames/dirnames> -@end example - -Make sure you have told git your name and email address - -@example -git config --global user.name "My Name" -git config --global user.email my@@email.invalid -@end example - -Use @var{--global} to set the global configuration for all your git checkouts. - -Git will select the changes to the files for commit. Optionally you can use -the interactive or the patch mode to select hunk by hunk what should be -added to the commit. - - -@example -git commit -@end example - -Git will commit the selected changes to your current local branch. - -You will be prompted for a log message in an editor, which is either -set in your personal configuration file through - -@example -git config --global core.editor -@end example - -or set by one of the following environment variables: -@var{GIT_EDITOR}, @var{VISUAL} or @var{EDITOR}. - -Log messages should be concise but descriptive. Explain why you made a change, -what you did will be obvious from the changes themselves most of the time. -Saying just "bug fix" or "10l" is bad. Remember that people of varying skill -levels look at and educate themselves while reading through your code. Don't -include filenames in log messages, Git provides that information. - -Possibly make the commit message have a terse, descriptive first line, an -empty line and then a full description. The first line will be used to name -the patch by git format-patch. - -@section Preparing a patchset - -@example -git format-patch <commit> [-o directory] -@end example - -will generate a set of patches for each commit between @var{<commit>} and -current @var{HEAD}. E.g. - -@example -git format-patch origin/master -@end example - -will generate patches for all commits on current branch which are not -present in upstream. -A useful shortcut is also - -@example -git format-patch -n -@end example - -which will generate patches from last @var{n} commits. -By default the patches are created in the current directory. - -@section Sending patches for review - -@example -git send-email <commit list|directory> -@end example - -will send the patches created by @command{git format-patch} or directly -generates them. All the email fields can be configured in the global/local -configuration or overridden by command line. -Note that this tool must often be installed separately (e.g. @var{git-email} -package on Debian-based distros). - - -@section Renaming/moving/copying files or contents of files - -Git automatically tracks such changes, making those normal commits. - -@example -mv/cp path/file otherpath/otherfile -git add [-A] . -git commit -@end example - - -@chapter Git configuration - -In order to simplify a few workflows, it is advisable to configure both -your personal Git installation and your local FFmpeg repository. - -@section Personal Git installation - -Add the following to your @file{~/.gitconfig} to help @command{git send-email} -and @command{git format-patch} detect renames: - -@example -[diff] - renames = copy -@end example - -@section Repository configuration - -In order to have @command{git send-email} automatically send patches -to the ffmpeg-devel mailing list, add the following stanza -to @file{/path/to/ffmpeg/repository/.git/config}: - -@example -[sendemail] - to = ffmpeg-devel@@ffmpeg.org -@end example - -@chapter FFmpeg specific - -@section Reverting broken commits - -@example -git reset <commit> -@end example - -@command{git reset} will uncommit the changes till @var{<commit>} rewriting -the current branch history. - -@example -git commit --amend -@end example - -allows to amend the last commit details quickly. - -@example -git rebase -i origin/master -@end example - -will replay local commits over the main repository allowing to edit, merge -or remove some of them in the process. - -@float NOTE -@command{git reset}, @command{git commit --amend} and @command{git rebase} -rewrite history, so you should use them ONLY on your local or topic branches. -The main repository will reject those changes. -@end float - -@example -git revert <commit> -@end example - -@command{git revert} will generate a revert commit. This will not make the -faulty commit disappear from the history. - -@section Pushing changes to remote trees - -@example -git push -@end example - -Will push the changes to the default remote (@var{origin}). -Git will prevent you from pushing changes if the local and remote trees are -out of sync. Refer to and to sync the local tree. - -@example -git remote add <name> <url> -@end example - -Will add additional remote with a name reference, it is useful if you want -to push your local branch for review on a remote host. - -@example -git push <remote> <refspec> -@end example - -Will push the changes to the @var{<remote>} repository. -Omitting @var{<refspec>} makes @command{git push} update all the remote -branches matching the local ones. - -@section Finding a specific svn revision - -Since version 1.7.1 git supports @var{:/foo} syntax for specifying commits -based on a regular expression. see man gitrevisions - -@example -git show :/'as revision 23456' -@end example - -will show the svn changeset @var{r23456}. With older git versions searching in -the @command{git log} output is the easiest option (especially if a pager with -search capabilities is used). -This commit can be checked out with - -@example -git checkout -b svn_23456 :/'as revision 23456' -@end example - -or for git < 1.7.1 with - -@example -git checkout -b svn_23456 $SHA1 -@end example - -where @var{$SHA1} is the commit hash from the @command{git log} output. - - -@chapter pre-push checklist - -Once you have a set of commits that you feel are ready for pushing, -work through the following checklist to doublecheck everything is in -proper order. This list tries to be exhaustive. In case you are just -pushing a typo in a comment, some of the steps may be unnecessary. -Apply your common sense, but if in doubt, err on the side of caution. - -First, make sure that the commits and branches you are going to push -match what you want pushed and that nothing is missing, extraneous or -wrong. You can see what will be pushed by running the git push command -with --dry-run first. And then inspecting the commits listed with -@command{git log -p 1234567..987654}. The @command{git status} command -may help in finding local changes that have been forgotten to be added. - -Next let the code pass through a full run of our testsuite. - -@itemize -@item @command{make distclean} -@item @command{/path/to/ffmpeg/configure} -@item @command{make check} -@item if fate fails due to missing samples run @command{make fate-rsync} and retry -@end itemize - -Make sure all your changes have been checked before pushing them, the -testsuite only checks against regressions and that only to some extend. It does -obviously not check newly added features/code to be working unless you have -added a test for that (which is recommended). - -Also note that every single commit should pass the test suite, not just -the result of a series of patches. - -Once everything passed, push the changes to your public ffmpeg clone and post a -merge request to ffmpeg-devel. You can also push them directly but this is not -recommended. - -@chapter Server Issues - -Contact the project admins @email{root@@ffmpeg.org} if you have technical -problems with the GIT server. diff --git a/ffmpeg/doc/indevs.texi b/ffmpeg/doc/indevs.texi deleted file mode 100644 index 72b1493..0000000 --- a/ffmpeg/doc/indevs.texi +++ /dev/null @@ -1,763 +0,0 @@ -@chapter Input Devices -@c man begin INPUT DEVICES - -Input devices are configured elements in FFmpeg which allow to access -the data coming from a multimedia device attached to your system. - -When you configure your FFmpeg build, all the supported input devices -are enabled by default. You can list all available ones using the -configure option "--list-indevs". - -You can disable all the input devices using the configure option -"--disable-indevs", and selectively enable an input device using the -option "--enable-indev=@var{INDEV}", or you can disable a particular -input device using the option "--disable-indev=@var{INDEV}". - -The option "-formats" of the ff* tools will display the list of -supported input devices (amongst the demuxers). - -A description of the currently available input devices follows. - -@section alsa - -ALSA (Advanced Linux Sound Architecture) input device. - -To enable this input device during configuration you need libasound -installed on your system. - -This device allows capturing from an ALSA device. The name of the -device to capture has to be an ALSA card identifier. - -An ALSA identifier has the syntax: -@example -hw:@var{CARD}[,@var{DEV}[,@var{SUBDEV}]] -@end example - -where the @var{DEV} and @var{SUBDEV} components are optional. - -The three arguments (in order: @var{CARD},@var{DEV},@var{SUBDEV}) -specify card number or identifier, device number and subdevice number -(-1 means any). - -To see the list of cards currently recognized by your system check the -files @file{/proc/asound/cards} and @file{/proc/asound/devices}. - -For example to capture with @command{ffmpeg} from an ALSA device with -card id 0, you may run the command: -@example -ffmpeg -f alsa -i hw:0 alsaout.wav -@end example - -For more information see: -@url{http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html} - -@section bktr - -BSD video input device. - -@section dshow - -Windows DirectShow input device. - -DirectShow support is enabled when FFmpeg is built with the mingw-w64 project. -Currently only audio and video devices are supported. - -Multiple devices may be opened as separate inputs, but they may also be -opened on the same input, which should improve synchronism between them. - -The input name should be in the format: - -@example -@var{TYPE}=@var{NAME}[:@var{TYPE}=@var{NAME}] -@end example - -where @var{TYPE} can be either @var{audio} or @var{video}, -and @var{NAME} is the device's name. - -@subsection Options - -If no options are specified, the device's defaults are used. -If the device does not support the requested options, it will -fail to open. - -@table @option - -@item video_size -Set the video size in the captured video. - -@item framerate -Set the frame rate in the captured video. - -@item sample_rate -Set the sample rate (in Hz) of the captured audio. - -@item sample_size -Set the sample size (in bits) of the captured audio. - -@item channels -Set the number of channels in the captured audio. - -@item list_devices -If set to @option{true}, print a list of devices and exit. - -@item list_options -If set to @option{true}, print a list of selected device's options -and exit. - -@item video_device_number -Set video device number for devices with same name (starts at 0, -defaults to 0). - -@item audio_device_number -Set audio device number for devices with same name (starts at 0, -defaults to 0). - -@item pixel_format -Select pixel format to be used by DirectShow. This may only be set when -the video codec is not set or set to rawvideo. - -@item audio_buffer_size -Set audio device buffer size in milliseconds (which can directly -impact latency, depending on the device). -Defaults to using the audio device's -default buffer size (typically some multiple of 500ms). -Setting this value too low can degrade performance. -See also -@url{http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx} - -@end table - -@subsection Examples - -@itemize - -@item -Print the list of DirectShow supported devices and exit: -@example -$ ffmpeg -list_devices true -f dshow -i dummy -@end example - -@item -Open video device @var{Camera}: -@example -$ ffmpeg -f dshow -i video="Camera" -@end example - -@item -Open second video device with name @var{Camera}: -@example -$ ffmpeg -f dshow -video_device_number 1 -i video="Camera" -@end example - -@item -Open video device @var{Camera} and audio device @var{Microphone}: -@example -$ ffmpeg -f dshow -i video="Camera":audio="Microphone" -@end example - -@item -Print the list of supported options in selected device and exit: -@example -$ ffmpeg -list_options true -f dshow -i video="Camera" -@end example - -@end itemize - -@section dv1394 - -Linux DV 1394 input device. - -@section fbdev - -Linux framebuffer input device. - -The Linux framebuffer is a graphic hardware-independent abstraction -layer to show graphics on a computer monitor, typically on the -console. It is accessed through a file device node, usually -@file{/dev/fb0}. - -For more detailed information read the file -Documentation/fb/framebuffer.txt included in the Linux source tree. - -To record from the framebuffer device @file{/dev/fb0} with -@command{ffmpeg}: -@example -ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi -@end example - -You can take a single screenshot image with the command: -@example -ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg -@end example - -See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1). - -@section iec61883 - -FireWire DV/HDV input device using libiec61883. - -To enable this input device, you need libiec61883, libraw1394 and -libavc1394 installed on your system. Use the configure option -@code{--enable-libiec61883} to compile with the device enabled. - -The iec61883 capture device supports capturing from a video device -connected via IEEE1394 (FireWire), using libiec61883 and the new Linux -FireWire stack (juju). This is the default DV/HDV input method in Linux -Kernel 2.6.37 and later, since the old FireWire stack was removed. - -Specify the FireWire port to be used as input file, or "auto" -to choose the first port connected. - -@subsection Options - -@table @option - -@item dvtype -Override autodetection of DV/HDV. This should only be used if auto -detection does not work, or if usage of a different device type -should be prohibited. Treating a DV device as HDV (or vice versa) will -not work and result in undefined behavior. -The values @option{auto}, @option{dv} and @option{hdv} are supported. - -@item dvbuffer -Set maxiumum size of buffer for incoming data, in frames. For DV, this -is an exact value. For HDV, it is not frame exact, since HDV does -not have a fixed frame size. - -@item dvguid -Select the capture device by specifying it's GUID. Capturing will only -be performed from the specified device and fails if no device with the -given GUID is found. This is useful to select the input if multiple -devices are connected at the same time. -Look at /sys/bus/firewire/devices to find out the GUIDs. - -@end table - -@subsection Examples - -@itemize - -@item -Grab and show the input of a FireWire DV/HDV device. -@example -ffplay -f iec61883 -i auto -@end example - -@item -Grab and record the input of a FireWire DV/HDV device, -using a packet buffer of 100000 packets if the source is HDV. -@example -ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg -@end example - -@end itemize - -@section jack - -JACK input device. - -To enable this input device during configuration you need libjack -installed on your system. - -A JACK input device creates one or more JACK writable clients, one for -each audio channel, with name @var{client_name}:input_@var{N}, where -@var{client_name} is the name provided by the application, and @var{N} -is a number which identifies the channel. -Each writable client will send the acquired data to the FFmpeg input -device. - -Once you have created one or more JACK readable clients, you need to -connect them to one or more JACK writable clients. - -To connect or disconnect JACK clients you can use the @command{jack_connect} -and @command{jack_disconnect} programs, or do it through a graphical interface, -for example with @command{qjackctl}. - -To list the JACK clients and their properties you can invoke the command -@command{jack_lsp}. - -Follows an example which shows how to capture a JACK readable client -with @command{ffmpeg}. -@example -# Create a JACK writable client with name "ffmpeg". -$ ffmpeg -f jack -i ffmpeg -y out.wav - -# Start the sample jack_metro readable client. -$ jack_metro -b 120 -d 0.2 -f 4000 - -# List the current JACK clients. -$ jack_lsp -c -system:capture_1 -system:capture_2 -system:playback_1 -system:playback_2 -ffmpeg:input_1 -metro:120_bpm - -# Connect metro to the ffmpeg writable client. -$ jack_connect metro:120_bpm ffmpeg:input_1 -@end example - -For more information read: -@url{http://jackaudio.org/} - -@section lavfi - -Libavfilter input virtual device. - -This input device reads data from the open output pads of a libavfilter -filtergraph. - -For each filtergraph open output, the input device will create a -corresponding stream which is mapped to the generated output. Currently -only video data is supported. The filtergraph is specified through the -option @option{graph}. - -@subsection Options - -@table @option - -@item graph -Specify the filtergraph to use as input. Each video open output must be -labelled by a unique string of the form "out@var{N}", where @var{N} is a -number starting from 0 corresponding to the mapped input stream -generated by the device. -The first unlabelled output is automatically assigned to the "out0" -label, but all the others need to be specified explicitly. - -If not specified defaults to the filename specified for the input -device. - -@item graph_file -Set the filename of the filtergraph to be read and sent to the other -filters. Syntax of the filtergraph is the same as the one specified by -the option @var{graph}. - -@end table - -@subsection Examples - -@itemize -@item -Create a color video stream and play it back with @command{ffplay}: -@example -ffplay -f lavfi -graph "color=c=pink [out0]" dummy -@end example - -@item -As the previous example, but use filename for specifying the graph -description, and omit the "out0" label: -@example -ffplay -f lavfi color=c=pink -@end example - -@item -Create three different video test filtered sources and play them: -@example -ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3 -@end example - -@item -Read an audio stream from a file using the amovie source and play it -back with @command{ffplay}: -@example -ffplay -f lavfi "amovie=test.wav" -@end example - -@item -Read an audio stream and a video stream and play it back with -@command{ffplay}: -@example -ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]" -@end example - -@end itemize - -@section libdc1394 - -IIDC1394 input device, based on libdc1394 and libraw1394. - -@section openal - -The OpenAL input device provides audio capture on all systems with a -working OpenAL 1.1 implementation. - -To enable this input device during configuration, you need OpenAL -headers and libraries installed on your system, and need to configure -FFmpeg with @code{--enable-openal}. - -OpenAL headers and libraries should be provided as part of your OpenAL -implementation, or as an additional download (an SDK). Depending on your -installation you may need to specify additional flags via the -@code{--extra-cflags} and @code{--extra-ldflags} for allowing the build -system to locate the OpenAL headers and libraries. - -An incomplete list of OpenAL implementations follows: - -@table @strong -@item Creative -The official Windows implementation, providing hardware acceleration -with supported devices and software fallback. -See @url{http://openal.org/}. -@item OpenAL Soft -Portable, open source (LGPL) software implementation. Includes -backends for the most common sound APIs on the Windows, Linux, -Solaris, and BSD operating systems. -See @url{http://kcat.strangesoft.net/openal.html}. -@item Apple -OpenAL is part of Core Audio, the official Mac OS X Audio interface. -See @url{http://developer.apple.com/technologies/mac/audio-and-video.html} -@end table - -This device allows to capture from an audio input device handled -through OpenAL. - -You need to specify the name of the device to capture in the provided -filename. If the empty string is provided, the device will -automatically select the default device. You can get the list of the -supported devices by using the option @var{list_devices}. - -@subsection Options - -@table @option - -@item channels -Set the number of channels in the captured audio. Only the values -@option{1} (monaural) and @option{2} (stereo) are currently supported. -Defaults to @option{2}. - -@item sample_size -Set the sample size (in bits) of the captured audio. Only the values -@option{8} and @option{16} are currently supported. Defaults to -@option{16}. - -@item sample_rate -Set the sample rate (in Hz) of the captured audio. -Defaults to @option{44.1k}. - -@item list_devices -If set to @option{true}, print a list of devices and exit. -Defaults to @option{false}. - -@end table - -@subsection Examples - -Print the list of OpenAL supported devices and exit: -@example -$ ffmpeg -list_devices true -f openal -i dummy out.ogg -@end example - -Capture from the OpenAL device @file{DR-BT101 via PulseAudio}: -@example -$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg -@end example - -Capture from the default device (note the empty string '' as filename): -@example -$ ffmpeg -f openal -i '' out.ogg -@end example - -Capture from two devices simultaneously, writing to two different files, -within the same @command{ffmpeg} command: -@example -$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg -@end example -Note: not all OpenAL implementations support multiple simultaneous capture - -try the latest OpenAL Soft if the above does not work. - -@section oss - -Open Sound System input device. - -The filename to provide to the input device is the device node -representing the OSS input device, and is usually set to -@file{/dev/dsp}. - -For example to grab from @file{/dev/dsp} using @command{ffmpeg} use the -command: -@example -ffmpeg -f oss -i /dev/dsp /tmp/oss.wav -@end example - -For more information about OSS see: -@url{http://manuals.opensound.com/usersguide/dsp.html} - -@section pulse - -PulseAudio input device. - -To enable this output device you need to configure FFmpeg with @code{--enable-libpulse}. - -The filename to provide to the input device is a source device or the -string "default" - -To list the PulseAudio source devices and their properties you can invoke -the command @command{pactl list sources}. - -More information about PulseAudio can be found on @url{http://www.pulseaudio.org}. - -@subsection Options -@table @option -@item server -Connect to a specific PulseAudio server, specified by an IP address. -Default server is used when not provided. - -@item name -Specify the application name PulseAudio will use when showing active clients, -by default it is the @code{LIBAVFORMAT_IDENT} string. - -@item stream_name -Specify the stream name PulseAudio will use when showing active streams, -by default it is "record". - -@item sample_rate -Specify the samplerate in Hz, by default 48kHz is used. - -@item channels -Specify the channels in use, by default 2 (stereo) is set. - -@item frame_size -Specify the number of bytes per frame, by default it is set to 1024. - -@item fragment_size -Specify the minimal buffering fragment in PulseAudio, it will affect the -audio latency. By default it is unset. -@end table - -@subsection Examples -Record a stream from default device: -@example -ffmpeg -f pulse -i default /tmp/pulse.wav -@end example - -@section sndio - -sndio input device. - -To enable this input device during configuration you need libsndio -installed on your system. - -The filename to provide to the input device is the device node -representing the sndio input device, and is usually set to -@file{/dev/audio0}. - -For example to grab from @file{/dev/audio0} using @command{ffmpeg} use the -command: -@example -ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav -@end example - -@section video4linux2, v4l2 - -Video4Linux2 input video device. - -"v4l2" can be used as alias for "video4linux2". - -If FFmpeg is built with v4l-utils support (by using the -@code{--enable-libv4l2} configure option), it is possible to use it with the -@code{-use_libv4l2} input device option. - -The name of the device to grab is a file device node, usually Linux -systems tend to automatically create such nodes when the device -(e.g. an USB webcam) is plugged into the system, and has a name of the -kind @file{/dev/video@var{N}}, where @var{N} is a number associated to -the device. - -Video4Linux2 devices usually support a limited set of -@var{width}x@var{height} sizes and frame rates. You can check which are -supported using @command{-list_formats all} for Video4Linux2 devices. -Some devices, like TV cards, support one or more standards. It is possible -to list all the supported standards using @command{-list_standards all}. - -The time base for the timestamps is 1 microsecond. Depending on the kernel -version and configuration, the timestamps may be derived from the real time -clock (origin at the Unix Epoch) or the monotonic clock (origin usually at -boot time, unaffected by NTP or manual changes to the clock). The -@option{-timestamps abs} or @option{-ts abs} option can be used to force -conversion into the real time clock. - -Some usage examples of the video4linux2 device with @command{ffmpeg} -and @command{ffplay}: -@itemize -@item -Grab and show the input of a video4linux2 device: -@example -ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0 -@end example - -@item -Grab and record the input of a video4linux2 device, leave the -frame rate and size as previously set: -@example -ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg -@end example -@end itemize - -For more information about Video4Linux, check @url{http://linuxtv.org/}. - -@subsection Options - -@table @option -@item standard -Set the standard. Must be the name of a supported standard. To get a -list of the supported standards, use the @option{list_standards} -option. - -@item channel -Set the input channel number. Default to -1, which means using the -previously selected channel. - -@item video_size -Set the video frame size. The argument must be a string in the form -@var{WIDTH}x@var{HEIGHT} or a valid size abbreviation. - -@item pixel_format -Select the pixel format (only valid for raw video input). - -@item input_format -Set the preferred pixel format (for raw video) or a codec name. -This option allows to select the input format, when several are -available. - -@item framerate -Set the preferred video frame rate. - -@item list_formats -List available formats (supported pixel formats, codecs, and frame -sizes) and exit. - -Available values are: -@table @samp -@item all -Show all available (compressed and non-compressed) formats. - -@item raw -Show only raw video (non-compressed) formats. - -@item compressed -Show only compressed formats. -@end table - -@item list_standards -List supported standards and exit. - -Available values are: -@table @samp -@item all -Show all supported standards. -@end table - -@item timestamps, ts -Set type of timestamps for grabbed frames. - -Available values are: -@table @samp -@item default -Use timestamps from the kernel. - -@item abs -Use absolute timestamps (wall clock). - -@item mono2abs -Force conversion from monotonic to absolute timestamps. -@end table - -Default value is @code{default}. -@end table - -@section vfwcap - -VfW (Video for Windows) capture input device. - -The filename passed as input is the capture driver number, ranging from -0 to 9. You may use "list" as filename to print a list of drivers. Any -other filename will be interpreted as device number 0. - -@section x11grab - -X11 video input device. - -This device allows to capture a region of an X11 display. - -The filename passed as input has the syntax: -@example -[@var{hostname}]:@var{display_number}.@var{screen_number}[+@var{x_offset},@var{y_offset}] -@end example - -@var{hostname}:@var{display_number}.@var{screen_number} specifies the -X11 display name of the screen to grab from. @var{hostname} can be -omitted, and defaults to "localhost". The environment variable -@env{DISPLAY} contains the default display name. - -@var{x_offset} and @var{y_offset} specify the offsets of the grabbed -area with respect to the top-left border of the X11 screen. They -default to 0. - -Check the X11 documentation (e.g. man X) for more detailed information. - -Use the @command{dpyinfo} program for getting basic information about the -properties of your X11 display (e.g. grep for "name" or "dimensions"). - -For example to grab from @file{:0.0} using @command{ffmpeg}: -@example -ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg -@end example - -Grab at position @code{10,20}: -@example -ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg -@end example - -@subsection Options - -@table @option -@item draw_mouse -Specify whether to draw the mouse pointer. A value of @code{0} specify -not to draw the pointer. Default value is @code{1}. - -@item follow_mouse -Make the grabbed area follow the mouse. The argument can be -@code{centered} or a number of pixels @var{PIXELS}. - -When it is specified with "centered", the grabbing region follows the mouse -pointer and keeps the pointer at the center of region; otherwise, the region -follows only when the mouse pointer reaches within @var{PIXELS} (greater than -zero) to the edge of region. - -For example: -@example -ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg -@end example - -To follow only when the mouse pointer reaches within 100 pixels to edge: -@example -ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg -@end example - -@item framerate -Set the grabbing frame rate. Default value is @code{ntsc}, -corresponding to a frame rate of @code{30000/1001}. - -@item show_region -Show grabbed region on screen. - -If @var{show_region} is specified with @code{1}, then the grabbing -region will be indicated on screen. With this option, it is easy to -know what is being grabbed if only a portion of the screen is grabbed. - -For example: -@example -ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg -@end example - -With @var{follow_mouse}: -@example -ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg -@end example - -@item video_size -Set the video frame size. Default value is @code{vga}. -@end table - -@c man end INPUT DEVICES diff --git a/ffmpeg/doc/issue_tracker.txt b/ffmpeg/doc/issue_tracker.txt deleted file mode 100644 index 33b3535..0000000 --- a/ffmpeg/doc/issue_tracker.txt +++ /dev/null @@ -1,194 +0,0 @@ -FFmpeg's bug/feature request tracker manual -================================================= - -NOTE: This is a draft. - -Overview: ---------- - -FFmpeg uses Trac for tracking issues, new issues and changes to -existing issues can be done through a web interface. - -Issues can be different kinds of things we want to keep track of -but that do not belong into the source tree itself. This includes -bug reports, feature requests and license violations. We -might add more items to this list in the future, so feel free to -propose a new `type of issue' on the ffmpeg-devel mailing list if -you feel it is worth tracking. - -It is possible to subscribe to individual issues by adding yourself to the -Cc list or to subscribe to the ffmpeg-trac mailing list which receives -a mail for every change to every issue. -(the above does all work already after light testing) - -The subscription URL for the ffmpeg-trac list is: -http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac -The URL of the webinterface of the tracker is: -http(s)://trac.ffmpeg.org - -Type: ------ -art - Artwork such as photos, music, banners, and logos. - -bug / defect - An error, flaw, mistake, failure, or fault in FFmpeg or libav* that - prevents it from behaving as intended. - -feature request / enhancement - Request of support for encoding or decoding of a new codec, container - or variant. - Request of support for more, less or plain different output or behavior - where the current implementation cannot be considered wrong. - -license violation - ticket to keep track of (L)GPL violations of ffmpeg by others - -sponsoring request - Developer requests for hardware, software, specifications, money, - refunds, etc. - -Priority: ---------- -critical - Bugs about data loss and security issues. - No feature request can be critical. - -important - Bugs which make FFmpeg unusable for a significant number of users. - Examples here might be completely broken MPEG-4 decoding or a build issue - on Linux. - While broken 4xm decoding or a broken OS/2 build would not be important, - the separation to normal is somewhat fuzzy. - For feature requests this priority would be used for things many people - want. - Regressions also should be marked as important, regressions are bugs that - don't exist in a past revision or another branch. - -normal - - -minor - Bugs about things like spelling errors, "mp2" instead of - "mp3" being shown and such. - Feature requests about things few people want or which do not make a big - difference. - -wish - Something that is desirable to have but that there is no urgency at - all to implement, e.g. something completely cosmetic like a website - restyle or a personalized doxy template or the FFmpeg logo. - This priority is not valid for bugs. - - -Status: -------- -new - initial state - -open - intermediate states - -closed - final state - - -Analyzed flag: --------------- -Bugs which have been analyzed and where it is understood what causes them -and which exact chain of events triggers them. This analysis should be -available as a message in the bug report. -Note, do not change the status to analyzed without also providing a clear -and understandable analysis. -This state implicates that the bug either has been reproduced or that -reproduction is not needed as the bug is already understood. - - -Type/Status: ----------- -*/new - Initial state of new bugs and feature requests submitted by - users. - -*/open - Issues which have been briefly looked at and which did not look outright - invalid. - This implicates that no real more detailed state applies yet. Conversely, - the more detailed states below implicate that the issue has been briefly - looked at. - -*/closed/duplicate - Bugs or feature requests which are duplicates. - Note, if you mark something as duplicate, do not forget setting the - superseder so bug reports are properly linked. - -*/closed/invalid - Bugs caused by user errors, random ineligible or otherwise nonsense stuff. - -*/closed/needs_more_info - Issues for which some information has been requested by the developers, - but which has not been provided by anyone within reasonable time. - - -bug/closed/fixed - Bugs which have to the best of our knowledge been fixed. - -bug/closed/wontfix - Bugs which we will not fix. Possible reasons include legality, high - complexity for the sake of supporting obscure corner cases, speed loss - for similarly esoteric purposes, et cetera. - This also means that we would reject a patch. - If we are just too lazy to fix a bug then the correct state is open - and unassigned. Closed means that the case is closed which is not - the case if we are just waiting for a patch. - -bug/closed/works_for_me - Bugs for which sufficient information was provided to reproduce but - reproduction failed - that is the code seems to work correctly to the - best of our knowledge. - -feature_request/closed/fixed - Feature requests which have been implemented. - -feature_request/closed/wontfix - Feature requests which will not be implemented. The reasons here could - be legal, philosophical or others. - -Note2, if you provide the requested info do not forget to remove the -needs_more_info resolution. - -Component: ----------- - -avcodec - issues in libavcodec/* - -avformat - issues in libavformat/* - -avutil - issues in libavutil/* - -regression test - issues in tests/* - -ffmpeg - issues in or related to ffmpeg.c - -ffplay - issues in or related to ffplay.c - -ffprobe - issues in or related to ffprobe.c - -ffserver - issues in or related to ffserver.c - -build system - issues in or related to configure/Makefile - -regression - bugs which were not present in a past revision - -trac - issues related to our issue tracker diff --git a/ffmpeg/doc/libavcodec.texi b/ffmpeg/doc/libavcodec.texi deleted file mode 100644 index 618f9f6..0000000 --- a/ffmpeg/doc/libavcodec.texi +++ /dev/null @@ -1,48 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Libavcodec Documentation -@titlepage -@center @titlefont{Libavcodec Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -The libavcodec library provides a generic encoding/decoding framework -and contains multiple decoders and encoders for audio, video and -subtitle streams, and several bitstream filters. - -The shared architecture provides various services ranging from bit -stream I/O to DSP optimizations, and makes it suitable for -implementing robust and fast codecs as well as for experimentation. - -@c man end DESCRIPTION - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{ffmpeg-codecs.html,ffmpeg-codecs}, @url{ffmpeg-bitstream-filters.html,bitstream-filters}, -@url{libavutil.html,libavutil} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), -ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), -libavutil(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename libavcodec -@settitle media streams decoding and encoding library - -@end ignore - -@bye diff --git a/ffmpeg/doc/libavdevice.texi b/ffmpeg/doc/libavdevice.texi deleted file mode 100644 index d5f790b..0000000 --- a/ffmpeg/doc/libavdevice.texi +++ /dev/null @@ -1,45 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Libavdevice Documentation -@titlepage -@center @titlefont{Libavdevice Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -The libavdevice library provides a generic framework for grabbing from -and rendering to many common multimedia input/output devices, and -supports several input and output devices, including Video4Linux2, -VfW, DShow, and ALSA. - -@c man end DESCRIPTION - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{ffmpeg-devices.html,ffmpeg-devices}, -@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), -ffmpeg-devices(1), -libavutil(3), libavcodec(3), libavformat(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename libavdevice -@settitle multimedia device handling library - -@end ignore - -@bye diff --git a/ffmpeg/doc/libavfilter.texi b/ffmpeg/doc/libavfilter.texi deleted file mode 100644 index 4f82944..0000000 --- a/ffmpeg/doc/libavfilter.texi +++ /dev/null @@ -1,44 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Libavfilter Documentation -@titlepage -@center @titlefont{Libavfilter Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -The libavfilter library provides a generic audio/video filtering -framework containing several filters, sources and sinks. - -@c man end DESCRIPTION - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{ffmpeg-filters.html,ffmpeg-filters}, -@url{libavutil.html,libavutil}, @url{libswscale.html,libswscale}, @url{libswresample.html,libswresample}, -@url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}, @url{libavdevice.html,libavdevice} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), -ffmpeg-filters(1), -libavutil(3), libswscale(3), libswresample(3), libavcodec(3), libavformat(3), libavdevice(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename libavfilter -@settitle multimedia filtering library - -@end ignore - -@bye diff --git a/ffmpeg/doc/libavformat.texi b/ffmpeg/doc/libavformat.texi deleted file mode 100644 index 85e49cb..0000000 --- a/ffmpeg/doc/libavformat.texi +++ /dev/null @@ -1,48 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Libavformat Documentation -@titlepage -@center @titlefont{Libavformat Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -The libavformat library provides a generic framework for multiplexing -and demultiplexing (muxing and demuxing) audio, video and subtitle -streams. It encompasses multiple muxers and demuxers for multimedia -container formats. - -It also supports several input and output protocols to access a media -resource. - -@c man end DESCRIPTION - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{ffmpeg-formats.html,ffmpeg-formats}, @url{ffmpeg-protocols.html,ffmpeg-protocols}, -@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), -ffmpeg-formats(1), ffmpeg-protocols(1), -libavutil(3), libavcodec(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename libavformat -@settitle multimedia muxing and demuxing library - -@end ignore - -@bye diff --git a/ffmpeg/doc/libavutil.texi b/ffmpeg/doc/libavutil.texi deleted file mode 100644 index 5ec7e84..0000000 --- a/ffmpeg/doc/libavutil.texi +++ /dev/null @@ -1,62 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Libavutil Documentation -@titlepage -@center @titlefont{Libavutil Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -The libavutil library is a utility library to aid portable -multimedia programming. It contains safe portable string functions, -random number generators, data structures, additional mathematics -functions, cryptography and multimedia related functionality (like -enumerations for pixel and sample formats). It is not a library for -code needed by both libavcodec and libavformat. - -The goals for this library is to be: - -@table @strong -@item Modular -It should have few interdependencies and the possibility of disabling individual -parts during @command{./configure}. - -@item Small -Both sources and objects should be small. - -@item Efficient -It should have low CPU and memory usage. - -@item Useful -It should avoid useless features that almost no one needs. -@end table - -@c man end DESCRIPTION - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{ffmpeg-utils.html,ffmpeg-utils} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), -ffmpeg-utils(1) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename libavutil -@settitle multimedia-biased utility library - -@end ignore - -@bye diff --git a/ffmpeg/doc/libswresample.texi b/ffmpeg/doc/libswresample.texi deleted file mode 100644 index 383e537..0000000 --- a/ffmpeg/doc/libswresample.texi +++ /dev/null @@ -1,70 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Libswresample Documentation -@titlepage -@center @titlefont{Libswresample Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -The libswresample library performs highly optimized audio resampling, -rematrixing and sample format conversion operations. - -Specifically, this library performs the following conversions: - -@itemize -@item -@emph{Resampling}: is the process of changing the audio rate, for -example from a high sample rate of 44100Hz to 8000Hz. Audio -conversion from high to low sample rate is a lossy process. Several -resampling options and algorithms are available. - -@item -@emph{Format conversion}: is the process of converting the type of -samples, for example from 16-bit signed samples to unsigned 8-bit or -float samples. It also handles packing conversion, when passing from -packed layout (all samples belonging to distinct channels interleaved -in the same buffer), to planar layout (all samples belonging to the -same channel stored in a dedicated buffer or "plane"). - -@item -@emph{Rematrixing}: is the process of changing the channel layout, for -example from stereo to mono. When the input channels cannot be mapped -to the output streams, the process is lossy, since it involves -different gain factors and mixing. -@end itemize - -Various other audio conversions (e.g. stretching and padding) are -enabled through dedicated options. - -@c man end DESCRIPTION - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{ffmpeg-resampler.html,ffmpeg-resampler}, -@url{libavutil.html,libavutil} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), -ffmpeg-resampler(1), -libavutil(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename libswresample -@settitle audio resampling library - -@end ignore - -@bye diff --git a/ffmpeg/doc/libswscale.texi b/ffmpeg/doc/libswscale.texi deleted file mode 100644 index 818e988..0000000 --- a/ffmpeg/doc/libswscale.texi +++ /dev/null @@ -1,63 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Libswscale Documentation -@titlepage -@center @titlefont{Libswscale Documentation} -@end titlepage - -@top - -@contents - -@chapter Description -@c man begin DESCRIPTION - -The libswscale library performs highly optimized image scaling and -colorspace and pixel format conversion operations. - -Specifically, this library performs the following conversions: - -@itemize -@item -@emph{Rescaling}: is the process of changing the video size. Several -rescaling options and algorithms are available. This is usually a -lossy process. - -@item -@emph{Pixel format conversion}: is the process of converting the image -format and colorspace of the image, for example from planar YUV420P to -RGB24 packed. It also handles packing conversion, that is converts -from packed layout (all pixels belonging to distinct planes -interleaved in the same buffer), to planar layout (all samples -belonging to the same plane stored in a dedicated buffer or "plane"). - -This is usually a lossy process in case the source and destination -colorspaces differ. -@end itemize - -@c man end DESCRIPTION - -@chapter See Also - -@ifhtml -@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver}, -@url{ffmpeg-scaler.html,ffmpeg-scaler}, -@url{libavutil.html,libavutil} -@end ifhtml - -@ifnothtml -ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), -ffmpeg-scaler(1), -libavutil(3) -@end ifnothtml - -@include authors.texi - -@ignore - -@setfilename libswscale -@settitle video scaling and pixel format conversion library - -@end ignore - -@bye diff --git a/ffmpeg/doc/metadata.texi b/ffmpeg/doc/metadata.texi deleted file mode 100644 index b7fc789..0000000 --- a/ffmpeg/doc/metadata.texi +++ /dev/null @@ -1,84 +0,0 @@ -@chapter Metadata -@c man begin METADATA - -FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded -INI-like text file and then load it back using the metadata muxer/demuxer. - -The file format is as follows: -@enumerate - -@item -A file consists of a header and a number of metadata tags divided into sections, -each on its own line. - -@item -The header is a ';FFMETADATA' string, followed by a version number (now 1). - -@item -Metadata tags are of the form 'key=value' - -@item -Immediately after header follows global metadata - -@item -After global metadata there may be sections with per-stream/per-chapter -metadata. - -@item -A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in -brackets ('[', ']') and ends with next section or end of file. - -@item -At the beginning of a chapter section there may be an optional timebase to be -used for start/end values. It must be in form 'TIMEBASE=num/den', where num and -den are integers. If the timebase is missing then start/end times are assumed to -be in milliseconds. -Next a chapter section must contain chapter start and end times in form -'START=num', 'END=num', where num is a positive integer. - -@item -Empty lines and lines starting with ';' or '#' are ignored. - -@item -Metadata keys or values containing special characters ('=', ';', '#', '\' and a -newline) must be escaped with a backslash '\'. - -@item -Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of -the tag (in the example above key is 'foo ', value is ' bar'). -@end enumerate - -A ffmetadata file might look like this: -@example -;FFMETADATA1 -title=bike\\shed -;this is a comment -artist=FFmpeg troll team - -[CHAPTER] -TIMEBASE=1/1000 -START=0 -#chapter ends at 0:01:00 -END=60000 -title=chapter \#1 -[STREAM] -title=multi\ -line -@end example - -By using the ffmetadata muxer and demuxer it is possible to extract -metadata from an input file to an ffmetadata file, and then transcode -the file into an output file with the edited ffmetadata file. - -Extracting an ffmetadata file with @file{ffmpeg} goes as follows: -@example -ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE -@end example - -Reinserting edited metadata information from the FFMETADATAFILE file can -be done as: -@example -ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT -@end example - -@c man end METADATA diff --git a/ffmpeg/doc/mips.txt b/ffmpeg/doc/mips.txt deleted file mode 100644 index 8c6779f..0000000 --- a/ffmpeg/doc/mips.txt +++ /dev/null @@ -1,75 +0,0 @@ -MIPS optimizations info -=============================================== - -MIPS optimizations of codecs are targeting MIPS 74k family of -CPUs. Some of these optimizations are relying more on properties of -this architecture and some are relying less (and can be used on most -MIPS architectures without degradation in performance). - -Along with FFMPEG copyright notice, there is MIPS copyright notice in -all the files that are created by people from MIPS Technologies. - -Example of copyright notice: -=============================================== -/* - * Copyright (c) 2012 - * MIPS Technologies, Inc., California. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * Author: Author Name (author_name@@mips.com) - */ - -Files that have MIPS copyright notice in them: -=============================================== -* libavutil/mips/ - float_dsp_mips.c - libm_mips.h -* libavcodec/ - fft_fixed_32.c - fft_init_table.c - fft_table.h - mdct_fixed_32.c -* libavcodec/mips/ - aaccoder_mips.c - aacpsy_mips.h - ac3dsp_mips.c - acelp_filters_mips.c - acelp_vectors_mips.c - amrwbdec_mips.c - amrwbdec_mips.h - celp_filters_mips.c - celp_math_mips.c - compute_antialias_fixed.h - compute_antialias_float.h - lsp_mips.h - dsputil_mips.c - fft_mips.c - fft_table.h - fft_init_table.c - fmtconvert_mips.c - iirfilter_mips.c - mpegaudiodsp_mips_fixed.c - mpegaudiodsp_mips_float.c diff --git a/ffmpeg/doc/multithreading.txt b/ffmpeg/doc/multithreading.txt deleted file mode 100644 index 2b992fc..0000000 --- a/ffmpeg/doc/multithreading.txt +++ /dev/null @@ -1,70 +0,0 @@ -FFmpeg multithreading methods -============================================== - -FFmpeg provides two methods for multithreading codecs. - -Slice threading decodes multiple parts of a frame at the same time, using -AVCodecContext execute() and execute2(). - -Frame threading decodes multiple frames at the same time. -It accepts N future frames and delays decoded pictures by N-1 frames. -The later frames are decoded in separate threads while the user is -displaying the current one. - -Restrictions on clients -============================================== - -Slice threading - -* The client's draw_horiz_band() must be thread-safe according to the comment - in avcodec.h. - -Frame threading - -* Restrictions with slice threading also apply. -* For best performance, the client should set thread_safe_callbacks if it - provides a thread-safe get_buffer() callback. -* There is one frame of delay added for every thread beyond the first one. - Clients must be able to handle this; the pkt_dts and pkt_pts fields in - AVFrame will work as usual. - -Restrictions on codec implementations -============================================== - -Slice threading - - None except that there must be something worth executing in parallel. - -Frame threading - -* Codecs can only accept entire pictures per packet. -* Codecs similar to ffv1, whose streams don't reset across frames, - will not work because their bitstreams cannot be decoded in parallel. - -* The contents of buffers must not be read before ff_thread_await_progress() - has been called on them. reget_buffer() and buffer age optimizations no longer work. -* The contents of buffers must not be written to after ff_thread_report_progress() - has been called on them. This includes draw_edges(). - -Porting codecs to frame threading -============================================== - -Find all context variables that are needed by the next frame. Move all -code changing them, as well as code calling get_buffer(), up to before -the decode process starts. Call ff_thread_finish_setup() afterwards. If -some code can't be moved, have update_thread_context() run it in the next -thread. - -If the codec allocates writable tables in its init(), add an init_thread_copy() -which re-allocates them for other threads. - -Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little -speed gain at this point but it should work. - -If there are inter-frame dependencies, so the codec calls -ff_thread_report/await_progress(), set AVCodecInternal.allocate_progress. The -frames must then be freed with ff_thread_release_buffer(). -Otherwise leave it at zero and decode directly into the user-supplied frames. - -Call ff_thread_report_progress() after some part of the current picture has decoded. -A good place to put this is where draw_horiz_band() is called - add this if it isn't -called anywhere, as it's useful too and the implementation is trivial when you're -doing this. Note that draw_edges() needs to be called before reporting progress. - -Before accessing a reference frame or its MVs, call ff_thread_await_progress(). diff --git a/ffmpeg/doc/muxers.texi b/ffmpeg/doc/muxers.texi deleted file mode 100644 index 776ba2b..0000000 --- a/ffmpeg/doc/muxers.texi +++ /dev/null @@ -1,960 +0,0 @@ -@chapter Muxers -@c man begin MUXERS - -Muxers are configured elements in FFmpeg which allow writing -multimedia streams to a particular type of file. - -When you configure your FFmpeg build, all the supported muxers -are enabled by default. You can list all available muxers using the -configure option @code{--list-muxers}. - -You can disable all the muxers with the configure option -@code{--disable-muxers} and selectively enable / disable single muxers -with the options @code{--enable-muxer=@var{MUXER}} / -@code{--disable-muxer=@var{MUXER}}. - -The option @code{-formats} of the ff* tools will display the list of -enabled muxers. - -A description of some of the currently available muxers follows. - -@anchor{aiff} -@section aiff - -Audio Interchange File Format muxer. - -It accepts the following options: - -@table @option -@item write_id3v2 -Enable ID3v2 tags writing when set to 1. Default is 0 (disabled). - -@item id3v2_version -Select ID3v2 version to write. Currently only version 3 and 4 (aka. -ID3v2.3 and ID3v2.4) are supported. The default is version 4. - -@end table - -@anchor{crc} -@section crc - -CRC (Cyclic Redundancy Check) testing format. - -This muxer computes and prints the Adler-32 CRC of all the input audio -and video frames. By default audio frames are converted to signed -16-bit raw audio and video frames to raw video before computing the -CRC. - -The output of the muxer consists of a single line of the form: -CRC=0x@var{CRC}, where @var{CRC} is a hexadecimal number 0-padded to -8 digits containing the CRC for all the decoded input frames. - -For example to compute the CRC of the input, and store it in the file -@file{out.crc}: -@example -ffmpeg -i INPUT -f crc out.crc -@end example - -You can print the CRC to stdout with the command: -@example -ffmpeg -i INPUT -f crc - -@end example - -You can select the output format of each frame with @command{ffmpeg} by -specifying the audio and video codec and format. For example to -compute the CRC of the input audio converted to PCM unsigned 8-bit -and the input video converted to MPEG-2 video, use the command: -@example -ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc - -@end example - -See also the @ref{framecrc} muxer. - -@anchor{framecrc} -@section framecrc - -Per-packet CRC (Cyclic Redundancy Check) testing format. - -This muxer computes and prints the Adler-32 CRC for each audio -and video packet. By default audio frames are converted to signed -16-bit raw audio and video frames to raw video before computing the -CRC. - -The output of the muxer consists of a line for each audio and video -packet of the form: -@example -@var{stream_index}, @var{packet_dts}, @var{packet_pts}, @var{packet_duration}, @var{packet_size}, 0x@var{CRC} -@end example - -@var{CRC} is a hexadecimal number 0-padded to 8 digits containing the -CRC of the packet. - -For example to compute the CRC of the audio and video frames in -@file{INPUT}, converted to raw audio and video packets, and store it -in the file @file{out.crc}: -@example -ffmpeg -i INPUT -f framecrc out.crc -@end example - -To print the information to stdout, use the command: -@example -ffmpeg -i INPUT -f framecrc - -@end example - -With @command{ffmpeg}, you can select the output format to which the -audio and video frames are encoded before computing the CRC for each -packet by specifying the audio and video codec. For example, to -compute the CRC of each decoded input audio frame converted to PCM -unsigned 8-bit and of each decoded input video frame converted to -MPEG-2 video, use the command: -@example -ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc - -@end example - -See also the @ref{crc} muxer. - -@anchor{framemd5} -@section framemd5 - -Per-packet MD5 testing format. - -This muxer computes and prints the MD5 hash for each audio -and video packet. By default audio frames are converted to signed -16-bit raw audio and video frames to raw video before computing the -hash. - -The output of the muxer consists of a line for each audio and video -packet of the form: -@example -@var{stream_index}, @var{packet_dts}, @var{packet_pts}, @var{packet_duration}, @var{packet_size}, @var{MD5} -@end example - -@var{MD5} is a hexadecimal number representing the computed MD5 hash -for the packet. - -For example to compute the MD5 of the audio and video frames in -@file{INPUT}, converted to raw audio and video packets, and store it -in the file @file{out.md5}: -@example -ffmpeg -i INPUT -f framemd5 out.md5 -@end example - -To print the information to stdout, use the command: -@example -ffmpeg -i INPUT -f framemd5 - -@end example - -See also the @ref{md5} muxer. - -@anchor{gif} -@section gif - -Animated GIF muxer. - -It accepts the following options: - -@table @option -@item loop -Set the number of times to loop the output. Use @code{-1} for no loop, @code{0} -for looping indefinitely (default). - -@item final_delay -Force the delay (expressed in centiseconds) after the last frame. Each frame -ends with a delay until the next frame. The default is @code{-1}, which is a -special value to tell the muxer to re-use the previous delay. In case of a -loop, you might want to customize this value to mark a pause for instance. -@end table - -For example, to encode a gif looping 10 times, with a 5 seconds delay between -the loops: -@example -ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif -@end example - -Note 1: if you wish to extract the frames in separate GIF files, you need to -force the @ref{image2} muxer: -@example -ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif" -@end example - -Note 2: the GIF format has a very small time base: the delay between two frames -can not be smaller than one centi second. - -@anchor{hls} -@section hls - -Apple HTTP Live Streaming muxer that segments MPEG-TS according to -the HTTP Live Streaming specification. - -It creates a playlist file and numbered segment files. The output -filename specifies the playlist filename; the segment filenames -receive the same basename as the playlist, a sequential number and -a .ts extension. - -@example -ffmpeg -i in.nut out.m3u8 -@end example - -@table @option -@item -hls_time @var{seconds} -Set the segment length in seconds. -@item -hls_list_size @var{size} -Set the maximum number of playlist entries. -@item -hls_wrap @var{wrap} -Set the number after which index wraps. -@item -start_number @var{number} -Start the sequence from @var{number}. -@end table - -@anchor{ico} -@section ico - -ICO file muxer. - -Microsoft's icon file format (ICO) has some strict limitations that should be noted: - -@itemize -@item -Size cannot exceed 256 pixels in any dimension - -@item -Only BMP and PNG images can be stored - -@item -If a BMP image is used, it must be one of the following pixel formats: -@example -BMP Bit Depth FFmpeg Pixel Format -1bit pal8 -4bit pal8 -8bit pal8 -16bit rgb555le -24bit bgr24 -32bit bgra -@end example - -@item -If a BMP image is used, it must use the BITMAPINFOHEADER DIB header - -@item -If a PNG image is used, it must use the rgba pixel format -@end itemize - -@anchor{image2} -@section image2 - -Image file muxer. - -The image file muxer writes video frames to image files. - -The output filenames are specified by a pattern, which can be used to -produce sequentially numbered series of files. -The pattern may contain the string "%d" or "%0@var{N}d", this string -specifies the position of the characters representing a numbering in -the filenames. If the form "%0@var{N}d" is used, the string -representing the number in each filename is 0-padded to @var{N} -digits. The literal character '%' can be specified in the pattern with -the string "%%". - -If the pattern contains "%d" or "%0@var{N}d", the first filename of -the file list specified will contain the number 1, all the following -numbers will be sequential. - -The pattern may contain a suffix which is used to automatically -determine the format of the image files to write. - -For example the pattern "img-%03d.bmp" will specify a sequence of -filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ..., -@file{img-010.bmp}, etc. -The pattern "img%%-%d.jpg" will specify a sequence of filenames of the -form @file{img%-1.jpg}, @file{img%-2.jpg}, ..., @file{img%-10.jpg}, -etc. - -The following example shows how to use @command{ffmpeg} for creating a -sequence of files @file{img-001.jpeg}, @file{img-002.jpeg}, ..., -taking one image every second from the input video: -@example -ffmpeg -i in.avi -vsync 1 -r 1 -f image2 'img-%03d.jpeg' -@end example - -Note that with @command{ffmpeg}, if the format is not specified with the -@code{-f} option and the output filename specifies an image file -format, the image2 muxer is automatically selected, so the previous -command can be written as: -@example -ffmpeg -i in.avi -vsync 1 -r 1 'img-%03d.jpeg' -@end example - -Note also that the pattern must not necessarily contain "%d" or -"%0@var{N}d", for example to create a single image file -@file{img.jpeg} from the input video you can employ the command: -@example -ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg -@end example - -@table @option -@item start_number @var{number} -Start the sequence from @var{number}. Default value is 1. Must be a -non-negative number. - -@item -update @var{number} -If @var{number} is nonzero, the filename will always be interpreted as just a -filename, not a pattern, and this file will be continuously overwritten with new -images. - -@end table - -The image muxer supports the .Y.U.V image file format. This format is -special in that that each image frame consists of three files, for -each of the YUV420P components. To read or write this image file format, -specify the name of the '.Y' file. The muxer will automatically open the -'.U' and '.V' files as required. - -@section matroska - -Matroska container muxer. - -This muxer implements the matroska and webm container specs. - -The recognized metadata settings in this muxer are: - -@table @option - -@item title=@var{title name} -Name provided to a single track -@end table - -@table @option - -@item language=@var{language name} -Specifies the language of the track in the Matroska languages form -@end table - -@table @option - -@item stereo_mode=@var{mode} -Stereo 3D video layout of two views in a single video track -@table @option -@item mono -video is not stereo -@item left_right -Both views are arranged side by side, Left-eye view is on the left -@item bottom_top -Both views are arranged in top-bottom orientation, Left-eye view is at bottom -@item top_bottom -Both views are arranged in top-bottom orientation, Left-eye view is on top -@item checkerboard_rl -Each view is arranged in a checkerboard interleaved pattern, Left-eye view being first -@item checkerboard_lr -Each view is arranged in a checkerboard interleaved pattern, Right-eye view being first -@item row_interleaved_rl -Each view is constituted by a row based interleaving, Right-eye view is first row -@item row_interleaved_lr -Each view is constituted by a row based interleaving, Left-eye view is first row -@item col_interleaved_rl -Both views are arranged in a column based interleaving manner, Right-eye view is first column -@item col_interleaved_lr -Both views are arranged in a column based interleaving manner, Left-eye view is first column -@item anaglyph_cyan_red -All frames are in anaglyph format viewable through red-cyan filters -@item right_left -Both views are arranged side by side, Right-eye view is on the left -@item anaglyph_green_magenta -All frames are in anaglyph format viewable through green-magenta filters -@item block_lr -Both eyes laced in one Block, Left-eye view is first -@item block_rl -Both eyes laced in one Block, Right-eye view is first -@end table -@end table - -For example a 3D WebM clip can be created using the following command line: -@example -ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm -@end example - -This muxer supports the following options: - -@table @option - -@item reserve_index_space -By default, this muxer writes the index for seeking (called cues in Matroska -terms) at the end of the file, because it cannot know in advance how much space -to leave for the index at the beginning of the file. However for some use cases --- e.g. streaming where seeking is possible but slow -- it is useful to put the -index at the beginning of the file. - -If this option is set to a non-zero value, the muxer will reserve a given amount -of space in the file header and then try to write the cues there when the muxing -finishes. If the available space does not suffice, muxing will fail. A safe size -for most use cases should be about 50kB per hour of video. - -Note that cues are only written if the output is seekable and this option will -have no effect if it is not. - -@end table - -@anchor{md5} -@section md5 - -MD5 testing format. - -This muxer computes and prints the MD5 hash of all the input audio -and video frames. By default audio frames are converted to signed -16-bit raw audio and video frames to raw video before computing the -hash. - -The output of the muxer consists of a single line of the form: -MD5=@var{MD5}, where @var{MD5} is a hexadecimal number representing -the computed MD5 hash. - -For example to compute the MD5 hash of the input converted to raw -audio and video, and store it in the file @file{out.md5}: -@example -ffmpeg -i INPUT -f md5 out.md5 -@end example - -You can print the MD5 to stdout with the command: -@example -ffmpeg -i INPUT -f md5 - -@end example - -See also the @ref{framemd5} muxer. - -@section mov/mp4/ismv - -MOV/MP4/ISMV (Smooth Streaming) muxer. - -The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4 -file has all the metadata about all packets stored in one location -(written at the end of the file, it can be moved to the start for -better playback by adding @var{faststart} to the @var{movflags}, or -using the @command{qt-faststart} tool). A fragmented -file consists of a number of fragments, where packets and metadata -about these packets are stored together. Writing a fragmented -file has the advantage that the file is decodable even if the -writing is interrupted (while a normal MOV/MP4 is undecodable if -it is not properly finished), and it requires less memory when writing -very long files (since writing normal MOV/MP4 files stores info about -every single packet in memory until the file is closed). The downside -is that it is less compatible with other applications. - -Fragmentation is enabled by setting one of the AVOptions that define -how to cut the file into fragments: - -@table @option -@item -moov_size @var{bytes} -Reserves space for the moov atom at the beginning of the file instead of placing the -moov atom at the end. If the space reserved is insufficient, muxing will fail. -@item -movflags frag_keyframe -Start a new fragment at each video keyframe. -@item -frag_duration @var{duration} -Create fragments that are @var{duration} microseconds long. -@item -frag_size @var{size} -Create fragments that contain up to @var{size} bytes of payload data. -@item -movflags frag_custom -Allow the caller to manually choose when to cut fragments, by -calling @code{av_write_frame(ctx, NULL)} to write a fragment with -the packets written so far. (This is only useful with other -applications integrating libavformat, not from @command{ffmpeg}.) -@item -min_frag_duration @var{duration} -Don't create fragments that are shorter than @var{duration} microseconds long. -@end table - -If more than one condition is specified, fragments are cut when -one of the specified conditions is fulfilled. The exception to this is -@code{-min_frag_duration}, which has to be fulfilled for any of the other -conditions to apply. - -Additionally, the way the output file is written can be adjusted -through a few other options: - -@table @option -@item -movflags empty_moov -Write an initial moov atom directly at the start of the file, without -describing any samples in it. Generally, an mdat/moov pair is written -at the start of the file, as a normal MOV/MP4 file, containing only -a short portion of the file. With this option set, there is no initial -mdat atom, and the moov atom only describes the tracks but has -a zero duration. - -Files written with this option set do not work in QuickTime. -This option is implicitly set when writing ismv (Smooth Streaming) files. -@item -movflags separate_moof -Write a separate moof (movie fragment) atom for each track. Normally, -packets for all tracks are written in a moof atom (which is slightly -more efficient), but with this option set, the muxer writes one moof/mdat -pair for each track, making it easier to separate tracks. - -This option is implicitly set when writing ismv (Smooth Streaming) files. -@item -movflags faststart -Run a second pass moving the index (moov atom) to the beginning of the file. -This operation can take a while, and will not work in various situations such -as fragmented output, thus it is not enabled by default. -@item -movflags rtphint -Add RTP hinting tracks to the output file. -@end table - -Smooth Streaming content can be pushed in real time to a publishing -point on IIS with this muxer. Example: -@example -ffmpeg -re @var{<normal input/transcoding options>} -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1) -@end example - -@section mp3 - -The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and -optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the -@code{id3v2_version} option controls which one is used. The legacy ID3v1 tag is -not written by default, but may be enabled with the @code{write_id3v1} option. - -For seekable output the muxer also writes a Xing frame at the beginning, which -contains the number of frames in the file. It is useful for computing duration -of VBR files. - -The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures -are supplied to the muxer in form of a video stream with a single packet. There -can be any number of those streams, each will correspond to a single APIC frame. -The stream metadata tags @var{title} and @var{comment} map to APIC -@var{description} and @var{picture type} respectively. See -@url{http://id3.org/id3v2.4.0-frames} for allowed picture types. - -Note that the APIC frames must be written at the beginning, so the muxer will -buffer the audio frames until it gets all the pictures. It is therefore advised -to provide the pictures as soon as possible to avoid excessive buffering. - -Examples: - -Write an mp3 with an ID3v2.3 header and an ID3v1 footer: -@example -ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3 -@end example - -To attach a picture to an mp3 file select both the audio and the picture stream -with @code{map}: -@example -ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1 --metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3 -@end example - -@section mpegts - -MPEG transport stream muxer. - -This muxer implements ISO 13818-1 and part of ETSI EN 300 468. - -The muxer options are: - -@table @option -@item -mpegts_original_network_id @var{number} -Set the original_network_id (default 0x0001). This is unique identifier -of a network in DVB. Its main use is in the unique identification of a -service through the path Original_Network_ID, Transport_Stream_ID. -@item -mpegts_transport_stream_id @var{number} -Set the transport_stream_id (default 0x0001). This identifies a -transponder in DVB. -@item -mpegts_service_id @var{number} -Set the service_id (default 0x0001) also known as program in DVB. -@item -mpegts_pmt_start_pid @var{number} -Set the first PID for PMT (default 0x1000, max 0x1f00). -@item -mpegts_start_pid @var{number} -Set the first PID for data packets (default 0x0100, max 0x0f00). -@item -mpegts_m2ts_mode @var{number} -Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode. -@item -muxrate @var{number} -Set muxrate. -@item -pes_payload_size @var{number} -Set minimum PES packet payload in bytes. -@item -mpegts_flags @var{flags} -Set flags (see below). -@item -mpegts_copyts @var{number} -Preserve original timestamps, if value is set to 1. Default value is -1, which -results in shifting timestamps so that they start from 0. -@item -tables_version @var{number} -Set PAT, PMT and SDT version (default 0, valid values are from 0 to 31, inclusively). -This option allows updating stream structure so that standard consumer may -detect the change. To do so, reopen output AVFormatContext (in case of API -usage) or restart ffmpeg instance, cyclically changing tables_version value: -@example -ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111 -ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111 -... -ffmpeg -i source3.ts -codec copy -f mpegts -tables_version 31 udp://1.1.1.1:1111 -ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111 -ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111 -... -@end example -@end table - -Option mpegts_flags may take a set of such flags: - -@table @option -@item resend_headers -Reemit PAT/PMT before writing the next packet. -@item latm -Use LATM packetization for AAC. -@end table - -The recognized metadata settings in mpegts muxer are @code{service_provider} -and @code{service_name}. If they are not set the default for -@code{service_provider} is "FFmpeg" and the default for -@code{service_name} is "Service01". - -@example -ffmpeg -i file.mpg -c copy \ - -mpegts_original_network_id 0x1122 \ - -mpegts_transport_stream_id 0x3344 \ - -mpegts_service_id 0x5566 \ - -mpegts_pmt_start_pid 0x1500 \ - -mpegts_start_pid 0x150 \ - -metadata service_provider="Some provider" \ - -metadata service_name="Some Channel" \ - -y out.ts -@end example - -@section null - -Null muxer. - -This muxer does not generate any output file, it is mainly useful for -testing or benchmarking purposes. - -For example to benchmark decoding with @command{ffmpeg} you can use the -command: -@example -ffmpeg -benchmark -i INPUT -f null out.null -@end example - -Note that the above command does not read or write the @file{out.null} -file, but specifying the output file is required by the @command{ffmpeg} -syntax. - -Alternatively you can write the command as: -@example -ffmpeg -benchmark -i INPUT -f null - -@end example - -@section ogg - -Ogg container muxer. - -@table @option -@item -page_duration @var{duration} -Preferred page duration, in microseconds. The muxer will attempt to create -pages that are approximately @var{duration} microseconds long. This allows the -user to compromise between seek granularity and container overhead. The default -is 1 second. A value of 0 will fill all segments, making pages as large as -possible. A value of 1 will effectively use 1 packet-per-page in most -situations, giving a small seek granularity at the cost of additional container -overhead. -@end table - -@section segment, stream_segment, ssegment - -Basic stream segmenter. - -The segmenter muxer outputs streams to a number of separate files of nearly -fixed duration. Output filename pattern can be set in a fashion similar to -@ref{image2}. - -@code{stream_segment} is a variant of the muxer used to write to -streaming output formats, i.e. which do not require global headers, -and is recommended for outputting e.g. to MPEG transport stream segments. -@code{ssegment} is a shorter alias for @code{stream_segment}. - -Every segment starts with a keyframe of the selected reference stream, -which is set through the @option{reference_stream} option. - -Note that if you want accurate splitting for a video file, you need to -make the input key frames correspond to the exact splitting times -expected by the segmenter, or the segment muxer will start the new -segment with the key frame found next after the specified start -time. - -The segment muxer works best with a single constant frame rate video. - -Optionally it can generate a list of the created segments, by setting -the option @var{segment_list}. The list type is specified by the -@var{segment_list_type} option. The entry filenames in the segment -list are set by default to the basename of the corresponding segment -files. - -The segment muxer supports the following options: - -@table @option -@item reference_stream @var{specifier} -Set the reference stream, as specified by the string @var{specifier}. -If @var{specifier} is set to @code{auto}, the reference is choosen -automatically. Otherwise it must be a stream specifier (see the ``Stream -specifiers'' chapter in the ffmpeg manual) which specifies the -reference stream. The default value is @code{auto}. - -@item segment_format @var{format} -Override the inner container format, by default it is guessed by the filename -extension. - -@item segment_list @var{name} -Generate also a listfile named @var{name}. If not specified no -listfile is generated. - -@item segment_list_flags @var{flags} -Set flags affecting the segment list generation. - -It currently supports the following flags: -@table @samp -@item cache -Allow caching (only affects M3U8 list files). - -@item live -Allow live-friendly file generation. -@end table - -@item segment_list_size @var{size} -Update the list file so that it contains at most the last @var{size} -segments. If 0 the list file will contain all the segments. Default -value is 0. - -@item segment_list_entry_prefix @var{prefix} -Set @var{prefix} to prepend to the name of each entry filename. By -default no prefix is applied. - -@item segment_list_type @var{type} -Specify the format for the segment list file. - -The following values are recognized: -@table @samp -@item flat -Generate a flat list for the created segments, one segment per line. - -@item csv, ext -Generate a list for the created segments, one segment per line, -each line matching the format (comma-separated values): -@example -@var{segment_filename},@var{segment_start_time},@var{segment_end_time} -@end example - -@var{segment_filename} is the name of the output file generated by the -muxer according to the provided pattern. CSV escaping (according to -RFC4180) is applied if required. - -@var{segment_start_time} and @var{segment_end_time} specify -the segment start and end time expressed in seconds. - -A list file with the suffix @code{".csv"} or @code{".ext"} will -auto-select this format. - -@samp{ext} is deprecated in favor or @samp{csv}. - -@item ffconcat -Generate an ffconcat file for the created segments. The resulting file -can be read using the FFmpeg @ref{concat} demuxer. - -A list file with the suffix @code{".ffcat"} or @code{".ffconcat"} will -auto-select this format. - -@item m3u8 -Generate an extended M3U8 file, version 3, compliant with -@url{http://tools.ietf.org/id/draft-pantos-http-live-streaming}. - -A list file with the suffix @code{".m3u8"} will auto-select this format. -@end table - -If not specified the type is guessed from the list file name suffix. - -@item segment_time @var{time} -Set segment duration to @var{time}, the value must be a duration -specification. Default value is "2". See also the -@option{segment_times} option. - -Note that splitting may not be accurate, unless you force the -reference stream key-frames at the given time. See the introductory -notice and the examples below. - -@item segment_time_delta @var{delta} -Specify the accuracy time when selecting the start time for a -segment, expressed as a duration specification. Default value is "0". - -When delta is specified a key-frame will start a new segment if its -PTS satisfies the relation: -@example -PTS >= start_time - time_delta -@end example - -This option is useful when splitting video content, which is always -split at GOP boundaries, in case a key frame is found just before the -specified split time. - -In particular may be used in combination with the @file{ffmpeg} option -@var{force_key_frames}. The key frame times specified by -@var{force_key_frames} may not be set accurately because of rounding -issues, with the consequence that a key frame time may result set just -before the specified time. For constant frame rate videos a value of -1/2*@var{frame_rate} should address the worst case mismatch between -the specified time and the time set by @var{force_key_frames}. - -@item segment_times @var{times} -Specify a list of split points. @var{times} contains a list of comma -separated duration specifications, in increasing order. See also -the @option{segment_time} option. - -@item segment_frames @var{frames} -Specify a list of split video frame numbers. @var{frames} contains a -list of comma separated integer numbers, in increasing order. - -This option specifies to start a new segment whenever a reference -stream key frame is found and the sequential number (starting from 0) -of the frame is greater or equal to the next value in the list. - -@item segment_wrap @var{limit} -Wrap around segment index once it reaches @var{limit}. - -@item segment_start_number @var{number} -Set the sequence number of the first segment. Defaults to @code{0}. - -@item reset_timestamps @var{1|0} -Reset timestamps at the begin of each segment, so that each segment -will start with near-zero timestamps. It is meant to ease the playback -of the generated segments. May not work with some combinations of -muxers/codecs. It is set to @code{0} by default. - -@item initial_offset @var{offset} -Specify timestamp offset to apply to the output packet timestamps. The -argument must be a time duration specification, and defaults to 0. -@end table - -@subsection Examples - -@itemize -@item -To remux the content of file @file{in.mkv} to a list of segments -@file{out-000.nut}, @file{out-001.nut}, etc., and write the list of -generated segments to @file{out.list}: -@example -ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.list out%03d.nut -@end example - -@item -As the example above, but segment the input file according to the split -points specified by the @var{segment_times} option: -@example -ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 out%03d.nut -@end example - -@item -As the example above, but use the @command{ffmpeg} @option{force_key_frames} -option to force key frames in the input at the specified location, together -with the segment option @option{segment_time_delta} to account for -possible roundings operated when setting key frame times. -@example -ffmpeg -i in.mkv -force_key_frames 1,2,3,5,8,13,21 -codec:v mpeg4 -codec:a pcm_s16le -map 0 \ --f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 -segment_time_delta 0.05 out%03d.nut -@end example -In order to force key frames on the input file, transcoding is -required. - -@item -Segment the input file by splitting the input file according to the -frame numbers sequence specified with the @option{segment_frames} option: -@example -ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_frames 100,200,300,500,800 out%03d.nut -@end example - -@item -To convert the @file{in.mkv} to TS segments using the @code{libx264} -and @code{libfaac} encoders: -@example -ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a libfaac -f ssegment -segment_list out.list out%03d.ts -@end example - -@item -Segment the input file, and create an M3U8 live playlist (can be used -as live HLS source): -@example -ffmpeg -re -i in.mkv -codec copy -map 0 -f segment -segment_list playlist.m3u8 \ --segment_list_flags +live -segment_time 10 out%03d.mkv -@end example -@end itemize - -@section tee - -The tee muxer can be used to write the same data to several files or any -other kind of muxer. It can be used, for example, to both stream a video to -the network and save it to disk at the same time. - -It is different from specifying several outputs to the @command{ffmpeg} -command-line tool because the audio and video data will be encoded only once -with the tee muxer; encoding can be a very expensive process. It is not -useful when using the libavformat API directly because it is then possible -to feed the same packets to several muxers directly. - -The slave outputs are specified in the file name given to the muxer, -separated by '|'. If any of the slave name contains the '|' separator, -leading or trailing spaces or any special character, it must be -escaped (see @ref{quoting_and_escaping,,the "Quoting and escaping" -section in the ffmpeg-utils(1) manual,ffmpeg-utils}). - -Muxer options can be specified for each slave by prepending them as a list of -@var{key}=@var{value} pairs separated by ':', between square brackets. If -the options values contain a special character or the ':' separator, they -must be escaped; note that this is a second level escaping. - -The following special options are also recognized: -@table @option -@item f -Specify the format name. Useful if it cannot be guessed from the -output name suffix. - -@item bsfs[/@var{spec}] -Specify a list of bitstream filters to apply to the specified -output. - -It is possible to specify to which streams a given bitstream filter -applies, by appending a stream specifier to the option separated by -@code{/}. @var{spec} must be a stream specifier (see @ref{Format -stream specifiers}). If the stream specifier is not specified, the -bistream filters will be applied to all streams in the output. - -Several bitstream filters can be specified, separated by ",". - -@item select -Select the streams that should be mapped to the slave output, -specified by a stream specifier. If not specified, this defaults to -all the input streams. -@end table - -Some examples follow. -@itemize -@item -Encode something and both archive it in a WebM file and stream it -as MPEG-TS over UDP (the streams need to be explicitly mapped): -@example -ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a - "archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/" -@end example - -@item -Use @command{ffmpeg} to encode the input, and send the output -to three different destinations. The @code{dump_extra} bitstream -filter is used to add extradata information to all the output video -keyframes packets, as requested by the MPEG-TS format. The select -option is applied to @file{out.aac} in order to make it contain only -audio packets. -@example -ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental - -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac" -@end example - -@item -As below, but select only stream @code{a:1} for the audio output. Note -that a second level escaping must be performed, as ":" is a special -character used to separate options. -@example -ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental - -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac" -@end example -@end itemize - -Note: some codecs may need different options depending on the output format; -the auto-detection of this can not work with the tee muxer. The main example -is the @option{global_header} flag. - -@c man end MUXERS diff --git a/ffmpeg/doc/nut.texi b/ffmpeg/doc/nut.texi deleted file mode 100644 index 0026a12..0000000 --- a/ffmpeg/doc/nut.texi +++ /dev/null @@ -1,138 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle NUT - -@titlepage -@center @titlefont{NUT} -@end titlepage - -@top - -@contents - -@chapter Description -NUT is a low overhead generic container format. It stores audio, video, -subtitle and user-defined streams in a simple, yet efficient, way. - -It was created by a group of FFmpeg and MPlayer developers in 2003 -and was finalized in 2008. - -The official nut specification is at svn://svn.mplayerhq.hu/nut -In case of any differences between this text and the official specification, -the official specification shall prevail. - -@chapter Container-specific codec tags - -@section Generic raw YUVA formats - -Since many exotic planar YUVA pixel formats are not considered by -the AVI/QuickTime FourCC lists, the following scheme is adopted for -representing them. - -The first two bytes can contain the values: -Y1 = only Y -Y2 = Y+A -Y3 = YUV -Y4 = YUVA - -The third byte represents the width and height chroma subsampling -values for the UV planes, that is the amount to shift the luma -width/height right to find the chroma width/height. - -The fourth byte is the number of bits used (8, 16, ...). - -If the order of bytes is inverted, that means that each component has -to be read big-endian. - -@section Raw Audio - -@multitable @columnfractions .4 .4 -@item ALAW @tab A-LAW -@item ULAW @tab MU-LAW -@item P<type><interleaving><bits> @tab little-endian PCM -@item <bits><interleaving><type>P @tab big-endian PCM -@end multitable - -<type> is S for signed integer, U for unsigned integer, F for IEEE float -<interleaving> is D for default, P is for planar. -<bits> is 8/16/24/32 - -@example -PFD[32] would for example be signed 32 bit little-endian IEEE float -@end example - -@section Subtitles - -@multitable @columnfractions .4 .4 -@item UTF8 @tab Raw UTF-8 -@item SSA[0] @tab SubStation Alpha -@item DVDS @tab DVD subtitles -@item DVBS @tab DVB subtitles -@end multitable - -@section Raw Data - -@multitable @columnfractions .4 .4 -@item UTF8 @tab Raw UTF-8 -@end multitable - -@section Codecs - -@multitable @columnfractions .4 .4 -@item 3IV1 @tab non-compliant MPEG-4 generated by old 3ivx -@item ASV1 @tab Asus Video -@item ASV2 @tab Asus Video 2 -@item CVID @tab Cinepak -@item CYUV @tab Creative YUV -@item DIVX @tab non-compliant MPEG-4 generated by old DivX -@item DUCK @tab Truemotion 1 -@item FFV1 @tab FFmpeg video 1 -@item FFVH @tab FFmpeg Huffyuv -@item H261 @tab ITU H.261 -@item H262 @tab ITU H.262 -@item H263 @tab ITU H.263 -@item H264 @tab ITU H.264 -@item HFYU @tab Huffyuv -@item I263 @tab Intel H.263 -@item IV31 @tab Indeo 3.1 -@item IV32 @tab Indeo 3.2 -@item IV50 @tab Indeo 5.0 -@item LJPG @tab ITU JPEG (lossless) -@item MJLS @tab ITU JPEG-LS -@item MJPG @tab ITU JPEG -@item MPG4 @tab MS MPEG-4v1 (not ISO MPEG-4) -@item MP42 @tab MS MPEG-4v2 -@item MP43 @tab MS MPEG-4v3 -@item MP4V @tab ISO MPEG-4 Part 2 Video (from old encoders) -@item mpg1 @tab ISO MPEG-1 Video -@item mpg2 @tab ISO MPEG-2 Video -@item MRLE @tab MS RLE -@item MSVC @tab MS Video 1 -@item RT21 @tab Indeo 2.1 -@item RV10 @tab RealVideo 1.0 -@item RV20 @tab RealVideo 2.0 -@item RV30 @tab RealVideo 3.0 -@item RV40 @tab RealVideo 4.0 -@item SNOW @tab FFmpeg Snow -@item SVQ1 @tab Sorenson Video 1 -@item SVQ3 @tab Sorenson Video 3 -@item theo @tab Xiph Theora -@item TM20 @tab Truemotion 2.0 -@item UMP4 @tab non-compliant MPEG-4 generated by UB Video MPEG-4 -@item VCR1 @tab ATI VCR1 -@item VP30 @tab VP 3.0 -@item VP31 @tab VP 3.1 -@item VP50 @tab VP 5.0 -@item VP60 @tab VP 6.0 -@item VP61 @tab VP 6.1 -@item VP62 @tab VP 6.2 -@item VP70 @tab VP 7.0 -@item WMV1 @tab MS WMV7 -@item WMV2 @tab MS WMV8 -@item WMV3 @tab MS WMV9 -@item WV1F @tab non-compliant MPEG-4 generated by ? -@item WVC1 @tab VC-1 -@item XVID @tab non-compliant MPEG-4 generated by old Xvid -@item XVIX @tab non-compliant MPEG-4 generated by old Xvid with interlacing bug -@end multitable - diff --git a/ffmpeg/doc/optimization.txt b/ffmpeg/doc/optimization.txt deleted file mode 100644 index 5a66d6b..0000000 --- a/ffmpeg/doc/optimization.txt +++ /dev/null @@ -1,288 +0,0 @@ -optimization Tips (for libavcodec): -=================================== - -What to optimize: ------------------ -If you plan to do non-x86 architecture specific optimizations (SIMD normally), -then take a look in the x86/ directory, as most important functions are -already optimized for MMX. - -If you want to do x86 optimizations then you can either try to finetune the -stuff in the x86 directory or find some other functions in the C source to -optimize, but there aren't many left. - - -Understanding these overoptimized functions: --------------------------------------------- -As many functions tend to be a bit difficult to understand because -of optimizations, it can be hard to optimize them further, or write -architecture-specific versions. It is recommended to look at older -revisions of the interesting files (web frontends for the various FFmpeg -branches are listed at http://ffmpeg.org/download.html). -Alternatively, look into the other architecture-specific versions in -the x86/, ppc/, alpha/ subdirectories. Even if you don't exactly -comprehend the instructions, it could help understanding the functions -and how they can be optimized. - -NOTE: If you still don't understand some function, ask at our mailing list!!! -(http://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel) - - -When is an optimization justified? ----------------------------------- -Normally, clean and simple optimizations for widely used codecs are -justified even if they only achieve an overall speedup of 0.1%. These -speedups accumulate and can make a big difference after awhile. Also, if -none of the following factors get worse due to an optimization -- speed, -binary code size, source size, source readability -- and at least one -factor improves, then an optimization is always a good idea even if the -overall gain is less than 0.1%. For obscure codecs that are not often -used, the goal is more toward keeping the code clean, small, and -readable instead of making it 1% faster. - - -WTF is that function good for ....: ------------------------------------ -The primary purpose of this list is to avoid wasting time optimizing functions -which are rarely used. - -put(_no_rnd)_pixels{,_x2,_y2,_xy2} - Used in motion compensation (en/decoding). - -avg_pixels{,_x2,_y2,_xy2} - Used in motion compensation of B-frames. - These are less important than the put*pixels functions. - -avg_no_rnd_pixels* - unused - -pix_abs16x16{,_x2,_y2,_xy2} - Used in motion estimation (encoding) with SAD. - -pix_abs8x8{,_x2,_y2,_xy2} - Used in motion estimation (encoding) with SAD of MPEG-4 4MV only. - These are less important than the pix_abs16x16* functions. - -put_mspel8_mc* / wmv2_mspel8* - Used only in WMV2. - it is not recommended that you waste your time with these, as WMV2 - is an ugly and relatively useless codec. - -mpeg4_qpel* / *qpel_mc* - Used in MPEG-4 qpel motion compensation (encoding & decoding). - The qpel8 functions are used only for 4mv, - the avg_* functions are used only for B-frames. - Optimizing them should have a significant impact on qpel - encoding & decoding. - -qpel{8,16}_mc??_old_c / *pixels{8,16}_l4 - Just used to work around a bug in an old libavcodec encoder version. - Don't optimize them. - -tpel_mc_func {put,avg}_tpel_pixels_tab - Used only for SVQ3, so only optimize them if you need fast SVQ3 decoding. - -add_bytes/diff_bytes - For huffyuv only, optimize if you want a faster ffhuffyuv codec. - -get_pixels / diff_pixels - Used for encoding, easy. - -clear_blocks - easiest to optimize - -gmc - Used for MPEG-4 gmc. - Optimizing this should have a significant effect on the gmc decoding - speed. - -gmc1 - Used for chroma blocks in MPEG-4 gmc with 1 warp point - (there are 4 luma & 2 chroma blocks per macroblock, so - only 1/3 of the gmc blocks use this, the other 2/3 - use the normal put_pixel* code, but only if there is - just 1 warp point). - Note: DivX5 gmc always uses just 1 warp point. - -pix_sum - Used for encoding. - -hadamard8_diff / sse / sad == pix_norm1 / dct_sad / quant_psnr / rd / bit - Specific compare functions used in encoding, it depends upon the - command line switches which of these are used. - Don't waste your time with dct_sad & quant_psnr, they aren't - really useful. - -put_pixels_clamped / add_pixels_clamped - Used for en/decoding in the IDCT, easy. - Note, some optimized IDCTs have the add/put clamped code included and - then put_pixels_clamped / add_pixels_clamped will be unused. - -idct/fdct - idct (encoding & decoding) - fdct (encoding) - difficult to optimize - -dct_quantize_trellis - Used for encoding with trellis quantization. - difficult to optimize - -dct_quantize - Used for encoding. - -dct_unquantize_mpeg1 - Used in MPEG-1 en/decoding. - -dct_unquantize_mpeg2 - Used in MPEG-2 en/decoding. - -dct_unquantize_h263 - Used in MPEG-4/H.263 en/decoding. - -FIXME remaining functions? -BTW, most of these functions are in dsputil.c/.h, some are in mpegvideo.c/.h. - - - -Alignment: -Some instructions on some architectures have strict alignment restrictions, -for example most SSE/SSE2 instructions on x86. -The minimum guaranteed alignment is written in the .h files, for example: - void (*put_pixels_clamped)(const int16_t *block/*align 16*/, UINT8 *pixels/*align 8*/, int line_size); - - -General Tips: -------------- -Use asm loops like: -__asm__( - "1: .... - ... - "jump_instruction .... -Do not use C loops: -do{ - __asm__( - ... -}while() - -For x86, mark registers that are clobbered in your asm. This means both -general x86 registers (e.g. eax) as well as XMM registers. This last one is -particularly important on Win64, where xmm6-15 are callee-save, and not -restoring their contents leads to undefined results. In external asm (e.g. -yasm), you do this by using: -cglobal functon_name, num_args, num_regs, num_xmm_regs -In inline asm, you specify clobbered registers at the end of your asm: -__asm__(".." ::: "%eax"). -If gcc is not set to support sse (-msse) it will not accept xmm registers -in the clobber list. For that we use two macros to declare the clobbers. -XMM_CLOBBERS should be used when there are other clobbers, for example: -__asm__(".." ::: XMM_CLOBBERS("xmm0",) "eax"); -and XMM_CLOBBERS_ONLY should be used when the only clobbers are xmm registers: -__asm__(".." :: XMM_CLOBBERS_ONLY("xmm0")); - -Do not expect a compiler to maintain values in your registers between separate -(inline) asm code blocks. It is not required to. For example, this is bad: -__asm__("movdqa %0, %%xmm7" : src); -/* do something */ -__asm__("movdqa %%xmm7, %1" : dst); -- first of all, you're assuming that the compiler will not use xmm7 in - between the two asm blocks. It probably won't when you test it, but it's - a poor assumption that will break at some point for some --cpu compiler flag -- secondly, you didn't mark xmm7 as clobbered. If you did, the compiler would - have restored the original value of xmm7 after the first asm block, thus - rendering the combination of the two blocks of code invalid -Code that depends on data in registries being untouched, should be written as -a single __asm__() statement. Ideally, a single function contains only one -__asm__() block. - -Use external asm (nasm/yasm) or inline asm (__asm__()), do not use intrinsics. -The latter requires a good optimizing compiler which gcc is not. - -Inline asm vs. external asm ---------------------------- -Both inline asm (__asm__("..") in a .c file, handled by a compiler such as gcc) -and external asm (.s or .asm files, handled by an assembler such as yasm/nasm) -are accepted in FFmpeg. Which one to use differs per specific case. - -- if your code is intended to be inlined in a C function, inline asm is always - better, because external asm cannot be inlined -- if your code calls external functions, yasm is always better -- if your code takes huge and complex structs as function arguments (e.g. - MpegEncContext; note that this is not ideal and is discouraged if there - are alternatives), then inline asm is always better, because predicting - member offsets in complex structs is almost impossible. It's safest to let - the compiler take care of that -- in many cases, both can be used and it just depends on the preference of the - person writing the asm. For new asm, the choice is up to you. For existing - asm, you'll likely want to maintain whatever form it is currently in unless - there is a good reason to change it. -- if, for some reason, you believe that a particular chunk of existing external - asm could be improved upon further if written in inline asm (or the other - way around), then please make the move from external asm <-> inline asm a - separate patch before your patches that actually improve the asm. - - -Links: -====== -http://www.aggregate.org/MAGIC/ - -x86-specific: -------------- -http://developer.intel.com/design/pentium4/manuals/248966.htm - -The IA-32 Intel Architecture Software Developer's Manual, Volume 2: -Instruction Set Reference -http://developer.intel.com/design/pentium4/manuals/245471.htm - -http://www.agner.org/assem/ - -AMD Athlon Processor x86 Code Optimization Guide: -http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/22007.pdf - - -ARM-specific: -------------- -ARM Architecture Reference Manual (up to ARMv5TE): -http://www.arm.com/community/university/eulaarmarm.html - -Procedure Call Standard for the ARM Architecture: -http://www.arm.com/pdfs/aapcs.pdf - -Optimization guide for ARM9E (used in Nokia 770 Internet Tablet): -http://infocenter.arm.com/help/topic/com.arm.doc.ddi0240b/DDI0240A.pdf -Optimization guide for ARM11 (used in Nokia N800 Internet Tablet): -http://infocenter.arm.com/help/topic/com.arm.doc.ddi0211j/DDI0211J_arm1136_r1p5_trm.pdf -Optimization guide for Intel XScale (used in Sharp Zaurus PDA): -http://download.intel.com/design/intelxscale/27347302.pdf -Intel Wireless MMX 2 Coprocessor: Programmers Reference Manual -http://download.intel.com/design/intelxscale/31451001.pdf - -PowerPC-specific: ------------------ -PowerPC32/AltiVec PIM: -www.freescale.com/files/32bit/doc/ref_manual/ALTIVECPEM.pdf - -PowerPC32/AltiVec PEM: -www.freescale.com/files/32bit/doc/ref_manual/ALTIVECPIM.pdf - -CELL/SPU: -http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/30B3520C93F437AB87257060006FFE5E/$file/Language_Extensions_for_CBEA_2.4.pdf -http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/9F820A5FFA3ECE8C8725716A0062585F/$file/CBE_Handbook_v1.1_24APR2007_pub.pdf - -SPARC-specific: ---------------- -SPARC Joint Programming Specification (JPS1): Commonality -http://www.fujitsu.com/downloads/PRMPWR/JPS1-R1.0.4-Common-pub.pdf - -UltraSPARC III Processor User's Manual (contains instruction timings) -http://www.sun.com/processors/manuals/USIIIv2.pdf - -VIS Whitepaper (contains optimization guidelines) -http://www.sun.com/processors/vis/download/vis/vis_whitepaper.pdf - -GCC asm links: --------------- -official doc but quite ugly -http://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html - -a bit old (note "+" is valid for input-output, even though the next disagrees) -http://www.cs.virginia.edu/~clc5q/gcc-inline-asm.pdf diff --git a/ffmpeg/doc/outdevs.texi b/ffmpeg/doc/outdevs.texi deleted file mode 100644 index a204f32..0000000 --- a/ffmpeg/doc/outdevs.texi +++ /dev/null @@ -1,328 +0,0 @@ -@chapter Output Devices -@c man begin OUTPUT DEVICES - -Output devices are configured elements in FFmpeg that can write -multimedia data to an output device attached to your system. - -When you configure your FFmpeg build, all the supported output devices -are enabled by default. You can list all available ones using the -configure option "--list-outdevs". - -You can disable all the output devices using the configure option -"--disable-outdevs", and selectively enable an output device using the -option "--enable-outdev=@var{OUTDEV}", or you can disable a particular -input device using the option "--disable-outdev=@var{OUTDEV}". - -The option "-formats" of the ff* tools will display the list of -enabled output devices (amongst the muxers). - -A description of the currently available output devices follows. - -@section alsa - -ALSA (Advanced Linux Sound Architecture) output device. - -@subsection Examples - -@itemize -@item -Play a file on default ALSA device: -@example -ffmpeg -i INPUT -f alsa default -@end example - -@item -Play a file on soundcard 1, audio device 7: -@example -ffmpeg -i INPUT -f alsa hw:1,7 -@end example -@end itemize - -@section caca - -CACA output device. - -This output device allows to show a video stream in CACA window. -Only one CACA window is allowed per application, so you can -have only one instance of this output device in an application. - -To enable this output device you need to configure FFmpeg with -@code{--enable-libcaca}. -libcaca is a graphics library that outputs text instead of pixels. - -For more information about libcaca, check: -@url{http://caca.zoy.org/wiki/libcaca} - -@subsection Options - -@table @option - -@item window_title -Set the CACA window title, if not specified default to the filename -specified for the output device. - -@item window_size -Set the CACA window size, can be a string of the form -@var{width}x@var{height} or a video size abbreviation. -If not specified it defaults to the size of the input video. - -@item driver -Set display driver. - -@item algorithm -Set dithering algorithm. Dithering is necessary -because the picture being rendered has usually far more colours than -the available palette. -The accepted values are listed with @code{-list_dither algorithms}. - -@item antialias -Set antialias method. Antialiasing smoothens the rendered -image and avoids the commonly seen staircase effect. -The accepted values are listed with @code{-list_dither antialiases}. - -@item charset -Set which characters are going to be used when rendering text. -The accepted values are listed with @code{-list_dither charsets}. - -@item color -Set color to be used when rendering text. -The accepted values are listed with @code{-list_dither colors}. - -@item list_drivers -If set to @option{true}, print a list of available drivers and exit. - -@item list_dither -List available dither options related to the argument. -The argument must be one of @code{algorithms}, @code{antialiases}, -@code{charsets}, @code{colors}. -@end table - -@subsection Examples - -@itemize -@item -The following command shows the @command{ffmpeg} output is an -CACA window, forcing its size to 80x25: -@example -ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca - -@end example - -@item -Show the list of available drivers and exit: -@example -ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true - -@end example - -@item -Show the list of available dither colors and exit: -@example -ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors - -@end example -@end itemize - -@section fbdev - -Linux framebuffer output device. - -The Linux framebuffer is a graphic hardware-independent abstraction -layer to show graphics on a computer monitor, typically on the -console. It is accessed through a file device node, usually -@file{/dev/fb0}. - -For more detailed information read the file -@file{Documentation/fb/framebuffer.txt} included in the Linux source tree. - -@subsection Options -@table @option - -@item xoffset -@item yoffset -Set x/y coordinate of top left corner. Default is 0. -@end table - -@subsection Examples -Play a file on framebuffer device @file{/dev/fb0}. -Required pixel format depends on current framebuffer settings. -@example -ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0 -@end example - -See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1). - -@section oss - -OSS (Open Sound System) output device. - -@section pulse - -PulseAudio output device. - -To enable this output device you need to configure FFmpeg with @code{--enable-libpulse}. - -More information about PulseAudio can be found on @url{http://www.pulseaudio.org} - -@subsection Options -@table @option - -@item server -Connect to a specific PulseAudio server, specified by an IP address. -Default server is used when not provided. - -@item name -Specify the application name PulseAudio will use when showing active clients, -by default it is the @code{LIBAVFORMAT_IDENT} string. - -@item stream_name -Specify the stream name PulseAudio will use when showing active streams, -by default it is set to the specified output name. - -@item device -Specify the device to use. Default device is used when not provided. -List of output devices can be obtained with command @command{pactl list sinks}. - -@item buffer_size -@item buffer_duration -Control the size and duration of the PulseAudio buffer. A small buffer -gives more control, but requires more frequent updates. - -@option{buffer_size} specifies size in bytes while -@option{buffer_duration} specifies duration in milliseconds. - -When both options are provided then the highest value is used -(duration is recalculated to bytes using stream parameters). If they -are set to 0 (which is default), the device will use the default -PulseAudio duration value. By default PulseAudio set buffer duration -to around 2 seconds. -@end table - -@subsection Examples -Play a file on default device on default server: -@example -ffmpeg -i INPUT -f pulse "stream name" -@end example - -@section sdl - -SDL (Simple DirectMedia Layer) output device. - -This output device allows to show a video stream in an SDL -window. Only one SDL window is allowed per application, so you can -have only one instance of this output device in an application. - -To enable this output device you need libsdl installed on your system -when configuring your build. - -For more information about SDL, check: -@url{http://www.libsdl.org/} - -@subsection Options - -@table @option - -@item window_title -Set the SDL window title, if not specified default to the filename -specified for the output device. - -@item icon_title -Set the name of the iconified SDL window, if not specified it is set -to the same value of @var{window_title}. - -@item window_size -Set the SDL window size, can be a string of the form -@var{width}x@var{height} or a video size abbreviation. -If not specified it defaults to the size of the input video, -downscaled according to the aspect ratio. - -@item window_fullscreen -Set fullscreen mode when non-zero value is provided. -Default value is zero. -@end table - -@subsection Interactive commands - -The window created by the device can be controlled through the -following interactive commands. - -@table @key -@item q, ESC -Quit the device immediately. -@end table - -@subsection Examples - -The following command shows the @command{ffmpeg} output is an -SDL window, forcing its size to the qcif format: -@example -ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output" -@end example - -@section sndio - -sndio audio output device. - -@section xv - -XV (XVideo) output device. - -This output device allows to show a video stream in a X Window System -window. - -@subsection Options - -@table @option -@item display_name -Specify the hardware display name, which determines the display and -communications domain to be used. - -The display name or DISPLAY environment variable can be a string in -the format @var{hostname}[:@var{number}[.@var{screen_number}]]. - -@var{hostname} specifies the name of the host machine on which the -display is physically attached. @var{number} specifies the number of -the display server on that host machine. @var{screen_number} specifies -the screen to be used on that server. - -If unspecified, it defaults to the value of the DISPLAY environment -variable. - -For example, @code{dual-headed:0.1} would specify screen 1 of display -0 on the machine named ``dual-headed''. - -Check the X11 specification for more detailed information about the -display name format. - -@item window_size -Set the created window size, can be a string of the form -@var{width}x@var{height} or a video size abbreviation. If not -specified it defaults to the size of the input video. - -@item window_x -@item window_y -Set the X and Y window offsets for the created window. They are both -set to 0 by default. The values may be ignored by the window manager. - -@item window_title -Set the window title, if not specified default to the filename -specified for the output device. -@end table - -For more information about XVideo see @url{http://www.x.org/}. - -@subsection Examples - -@itemize -@item -Decode, display and encode video input with @command{ffmpeg} at the -same time: -@example -ffmpeg -i INPUT OUTPUT -f xv display -@end example - -@item -Decode and display the input video to multiple X11 windows: -@example -ffmpeg -i INPUT -f xv normal -vf negate -f xv negated -@end example -@end itemize - -@c man end OUTPUT DEVICES diff --git a/ffmpeg/doc/platform.texi b/ffmpeg/doc/platform.texi deleted file mode 100644 index 934a3ae..0000000 --- a/ffmpeg/doc/platform.texi +++ /dev/null @@ -1,376 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Platform Specific Information -@titlepage -@center @titlefont{Platform Specific Information} -@end titlepage - -@top - -@contents - -@chapter Unix-like - -Some parts of FFmpeg cannot be built with version 2.15 of the GNU -assembler which is still provided by a few AMD64 distributions. To -make sure your compiler really uses the required version of gas -after a binutils upgrade, run: - -@example -$(gcc -print-prog-name=as) --version -@end example - -If not, then you should install a different compiler that has no -hard-coded path to gas. In the worst case pass @code{--disable-asm} -to configure. - -@section BSD - -BSD make will not build FFmpeg, you need to install and use GNU Make -(@command{gmake}). - -@section (Open)Solaris - -GNU Make is required to build FFmpeg, so you have to invoke (@command{gmake}), -standard Solaris Make will not work. When building with a non-c99 front-end -(gcc, generic suncc) add either @code{--extra-libs=/usr/lib/values-xpg6.o} -or @code{--extra-libs=/usr/lib/64/values-xpg6.o} to the configure options -since the libc is not c99-compliant by default. The probes performed by -configure may raise an exception leading to the death of configure itself -due to a bug in the system shell. Simply invoke a different shell such as -bash directly to work around this: - -@example -bash ./configure -@end example - -@anchor{Darwin} -@section Darwin (Mac OS X, iPhone) - -The toolchain provided with Xcode is sufficient to build the basic -unacelerated code. - -Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from -@url{http://github.com/yuvi/gas-preprocessor} to build the optimized -assembler functions. Just download the Perl script and put it somewhere -in your PATH, FFmpeg's configure will pick it up automatically. - -Mac OS X on amd64 and x86 requires @command{yasm} to build most of the -optimized assembler functions. @uref{http://www.finkproject.org/, Fink}, -@uref{http://www.gentoo.org/proj/en/gentoo-alt/prefix/bootstrap-macos.xml, Gentoo Prefix}, -@uref{http://mxcl.github.com/homebrew/, Homebrew} -or @uref{http://www.macports.org, MacPorts} can easily provide it. - - -@chapter DOS - -Using a cross-compiler is preferred for various reasons. -@url{http://www.delorie.com/howto/djgpp/linux-x-djgpp.html} - - -@chapter OS/2 - -For information about compiling FFmpeg on OS/2 see -@url{http://www.edm2.com/index.php/FFmpeg}. - - -@chapter Windows - -To get help and instructions for building FFmpeg under Windows, check out -the FFmpeg Windows Help Forum at @url{http://ffmpeg.zeranoe.com/forum/}. - -@section Native Windows compilation using MinGW or MinGW-w64 - -FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64 -toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from -@url{http://www.mingw.org/} or @url{http://mingw-w64.sourceforge.net/}. -You can find detailed installation instructions in the download section and -the FAQ. - -Notes: - -@itemize - -@item Building natively using MSYS can be sped up by disabling implicit rules -in the Makefile by calling @code{make -r} instead of plain @code{make}. This -speed up is close to non-existent for normal one-off builds and is only -noticeable when running make for a second time (for example during -@code{make install}). - -@item In order to compile FFplay, you must have the MinGW development library -of @uref{http://www.libsdl.org/, SDL} and @code{pkg-config} installed. - -@item By using @code{./configure --enable-shared} when configuring FFmpeg, -you can build the FFmpeg libraries (e.g. libavutil, libavcodec, -libavformat) as DLLs. - -@end itemize - -@section Microsoft Visual C++ or Intel C++ Compiler for Windows - -FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility -and wrapper, or with MSVC 2013 and ICL natively. - -You will need the following prerequisites: - -@itemize -@item @uref{https://github.com/libav/c99-to-c89/, C99-to-C89 Converter & Wrapper} -(if using MSVC 2012 or earlier) -@item @uref{http://code.google.com/p/msinttypes/, msinttypes} -(if using MSVC 2012 or earlier) -@item @uref{http://www.mingw.org/, MSYS} -@item @uref{http://yasm.tortall.net/, YASM} -@item @uref{http://gnuwin32.sourceforge.net/packages/bc.htm, bc for Windows} if -you want to run @uref{fate.html, FATE}. -@end itemize - -To set up a proper environment in MSYS, you need to run @code{msys.bat} from -the Visual Studio or Intel Compiler command prompt. - -Place @code{yasm.exe} somewhere in your @code{PATH}. If using MSVC 2012 or -earlier, place @code{c99wrap.exe} and @code{c99conv.exe} somewhere in your -@code{PATH} as well. - -Next, make sure any other headers and libs you want to use, such as zlib, are -located in a spot that the compiler can see. Do so by modifying the @code{LIB} -and @code{INCLUDE} environment variables to include the @strong{Windows-style} -paths to these directories. Alternatively, you can try and use the -@code{--extra-cflags}/@code{--extra-ldflags} configure options. If using MSVC -2012 or earlier, place @code{inttypes.h} somewhere the compiler can see too. - -Finally, run: - -@example -For MSVC: -./configure --toolchain=msvc - -For ICL: -./configure --toolchain=icl - -make -make install -@end example - -If you wish to compile shared libraries, add @code{--enable-shared} to your -configure options. Note that due to the way MSVC and ICL handle DLL imports and -exports, you cannot compile static and shared libraries at the same time, and -enabling shared libraries will automatically disable the static ones. - -Notes: - -@itemize - -@item It is possible that coreutils' @code{link.exe} conflicts with MSVC's linker. -You can find out by running @code{which link} to see which @code{link.exe} you -are using. If it is located at @code{/bin/link.exe}, then you have the wrong one -in your @code{PATH}. Either move or remove that copy, or make sure MSVC's -@code{link.exe} takes precedence in your @code{PATH} over coreutils'. - -@item If you wish to build with zlib support, you will have to grab a compatible -zlib binary from somewhere, with an MSVC import lib, or if you wish to link -statically, you can follow the instructions below to build a compatible -@code{zlib.lib} with MSVC. Regardless of which method you use, you must still -follow step 3, or compilation will fail. -@enumerate -@item Grab the @uref{http://zlib.net/, zlib sources}. -@item Edit @code{win32/Makefile.msc} so that it uses -MT instead of -MD, since -this is how FFmpeg is built as well. -@item Edit @code{zconf.h} and remove its inclusion of @code{unistd.h}. This gets -erroneously included when building FFmpeg. -@item Run @code{nmake -f win32/Makefile.msc}. -@item Move @code{zlib.lib}, @code{zconf.h}, and @code{zlib.h} to somewhere MSVC -can see. -@end enumerate - -@item FFmpeg has been tested with the following on i686 and x86_64: -@itemize -@item Visual Studio 2010 Pro and Express -@item Visual Studio 2012 Pro and Express -@item Visual Studio 2013 Pro and Express -@item Intel Composer XE 2013 -@item Intel Composer XE 2013 SP1 -@end itemize -Anything else is not officially supported. - -@end itemize - -@subsection Linking to FFmpeg with Microsoft Visual C++ - -If you plan to link with MSVC-built static libraries, you will need -to make sure you have @code{Runtime Library} set to -@code{Multi-threaded (/MT)} in your project's settings. - -You will need to define @code{inline} to something MSVC understands: -@example -#define inline __inline -@end example - -Also note, that as stated in @strong{Microsoft Visual C++}, you will need -an MSVC-compatible @uref{http://code.google.com/p/msinttypes/, inttypes.h}. - -If you plan on using import libraries created by dlltool, you must -set @code{References} to @code{No (/OPT:NOREF)} under the linker optimization -settings, otherwise the resulting binaries will fail during runtime. -This is not required when using import libraries generated by @code{lib.exe}. -This issue is reported upstream at -@url{http://sourceware.org/bugzilla/show_bug.cgi?id=12633}. - -To create import libraries that work with the @code{/OPT:REF} option -(which is enabled by default in Release mode), follow these steps: - -@enumerate - -@item Open the @emph{Visual Studio Command Prompt}. - -Alternatively, in a normal command line prompt, call @file{vcvars32.bat} -which sets up the environment variables for the Visual C++ tools -(the standard location for this file is something like -@file{C:\Program Files (x86_\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat}). - -@item Enter the @file{bin} directory where the created LIB and DLL files -are stored. - -@item Generate new import libraries with @command{lib.exe}: - -@example -lib /machine:i386 /def:..\lib\foo-version.def /out:foo.lib -@end example - -Replace @code{foo-version} and @code{foo} with the respective library names. - -@end enumerate - -@anchor{Cross compilation for Windows with Linux} -@section Cross compilation for Windows with Linux - -You must use the MinGW cross compilation tools available at -@url{http://www.mingw.org/}. - -Then configure FFmpeg with the following options: -@example -./configure --target-os=mingw32 --cross-prefix=i386-mingw32msvc- -@end example -(you can change the cross-prefix according to the prefix chosen for the -MinGW tools). - -Then you can easily test FFmpeg with @uref{http://www.winehq.com/, Wine}. - -@section Compilation under Cygwin - -Please use Cygwin 1.7.x as the obsolete 1.5.x Cygwin versions lack -llrint() in its C library. - -Install your Cygwin with all the "Base" packages, plus the -following "Devel" ones: -@example -binutils, gcc4-core, make, git, mingw-runtime, texi2html -@end example - -In order to run FATE you will also need the following "Utils" packages: -@example -bc, diffutils -@end example - -If you want to build FFmpeg with additional libraries, download Cygwin -"Devel" packages for Ogg and Vorbis from any Cygwin packages repository: -@example -libogg-devel, libvorbis-devel -@end example - -These library packages are only available from -@uref{http://sourceware.org/cygwinports/, Cygwin Ports}: - -@example -yasm, libSDL-devel, libfaac-devel, libaacplus-devel, libgsm-devel, libmp3lame-devel, -libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel -@end example - -The recommendation for x264 is to build it from source, as it evolves too -quickly for Cygwin Ports to be up to date. - -@section Crosscompilation for Windows under Cygwin - -With Cygwin you can create Windows binaries that do not need the cygwin1.dll. - -Just install your Cygwin as explained before, plus these additional -"Devel" packages: -@example -gcc-mingw-core, mingw-runtime, mingw-zlib -@end example - -and add some special flags to your configure invocation. - -For a static build run -@example -./configure --target-os=mingw32 --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin -@end example - -and for a build with shared libraries -@example -./configure --target-os=mingw32 --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin -@end example - -@chapter Plan 9 - -The native @uref{http://plan9.bell-labs.com/plan9/, Plan 9} compiler -does not implement all the C99 features needed by FFmpeg so the gcc -port must be used. Furthermore, a few items missing from the C -library and shell environment need to be fixed. - -@itemize - -@item GNU awk, grep, make, and sed - -Working packages of these tools can be found at -@uref{http://code.google.com/p/ports2plan9/downloads/list, ports2plan9}. -They can be installed with @uref{http://9front.org/, 9front's} @code{pkg} -utility by setting @code{pkgpath} to -@code{http://ports2plan9.googlecode.com/files/}. - -@item Missing/broken @code{head} and @code{printf} commands - -Replacements adequate for building FFmpeg can be found in the -@code{compat/plan9} directory. Place these somewhere they will be -found by the shell. These are not full implementations of the -commands and are @emph{not} suitable for general use. - -@item Missing C99 @code{stdint.h} and @code{inttypes.h} - -Replacement headers are available from -@url{http://code.google.com/p/plan9front/issues/detail?id=152}. - -@item Missing or non-standard library functions - -Some functions in the C library are missing or incomplete. The -@code{@uref{http://ports2plan9.googlecode.com/files/gcc-apelibs-1207.tbz, -gcc-apelibs-1207}} package from -@uref{http://code.google.com/p/ports2plan9/downloads/list, ports2plan9} -includes an updated C library, but installing the full package gives -unusable executables. Instead, keep the files from @code{gccbin.tgz} -under @code{/386/lib/gnu}. From the @code{libc.a} archive in the -@code{gcc-apelibs-1207} package, extract the following object files and -turn them into a library: - -@itemize -@item @code{strerror.o} -@item @code{strtoll.o} -@item @code{snprintf.o} -@item @code{vsnprintf.o} -@item @code{vfprintf.o} -@item @code{_IO_getc.o} -@item @code{_IO_putc.o} -@end itemize - -Use the @code{--extra-libs} option of @code{configure} to inform the -build system of this library. - -@item FPU exceptions enabled by default - -Unlike most other systems, Plan 9 enables FPU exceptions by default. -These must be disabled before calling any FFmpeg functions. While the -included tools will do this automatically, other users of the -libraries must do it themselves. - -@end itemize - -@bye diff --git a/ffmpeg/doc/print_options.c b/ffmpeg/doc/print_options.c deleted file mode 100644 index ec8d839..0000000 --- a/ffmpeg/doc/print_options.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) 2012 Anton Khirnov - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/* - * generate texinfo manpages for avoptions - */ - -#include <stddef.h> -#include <string.h> -#include <float.h> - -#include "libavformat/avformat.h" -#include "libavformat/options_table.h" -#include "libavcodec/avcodec.h" -#include "libavcodec/options_table.h" -#include "libavutil/opt.h" - -static void print_usage(void) -{ - fprintf(stderr, "Usage: enum_options type\n" - "type: format codec\n"); - exit(1); -} - -static void print_option(const AVOption *opts, const AVOption *o, int per_stream) -{ - if (!(o->flags & (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM))) - return; - - printf("@item -%s%s @var{", o->name, per_stream ? "[:stream_specifier]" : ""); - switch (o->type) { - case AV_OPT_TYPE_BINARY: printf("hexadecimal string"); break; - case AV_OPT_TYPE_STRING: printf("string"); break; - case AV_OPT_TYPE_INT: - case AV_OPT_TYPE_INT64: printf("integer"); break; - case AV_OPT_TYPE_FLOAT: - case AV_OPT_TYPE_DOUBLE: printf("float"); break; - case AV_OPT_TYPE_RATIONAL: printf("rational number"); break; - case AV_OPT_TYPE_FLAGS: printf("flags"); break; - default: printf("value"); break; - } - printf("} (@emph{"); - - if (o->flags & AV_OPT_FLAG_DECODING_PARAM) { - printf("input"); - if (o->flags & AV_OPT_FLAG_ENCODING_PARAM) - printf("/"); - } - if (o->flags & AV_OPT_FLAG_ENCODING_PARAM) printf("output"); - if (o->flags & AV_OPT_FLAG_AUDIO_PARAM) printf(",audio"); - if (o->flags & AV_OPT_FLAG_VIDEO_PARAM) printf(",video"); - if (o->flags & AV_OPT_FLAG_SUBTITLE_PARAM) printf(",subtitles"); - - printf("})\n"); - if (o->help) - printf("%s\n", o->help); - - if (o->unit) { - const AVOption *u; - printf("\nPossible values:\n@table @samp\n"); - - for (u = opts; u->name; u++) { - if (u->type == AV_OPT_TYPE_CONST && u->unit && !strcmp(u->unit, o->unit)) - printf("@item %s\n%s\n", u->name, u->help ? u->help : ""); - } - printf("@end table\n"); - } -} - -static void show_opts(const AVOption *opts, int per_stream) -{ - const AVOption *o; - - printf("@table @option\n"); - for (o = opts; o->name; o++) { - if (o->type != AV_OPT_TYPE_CONST) - print_option(opts, o, per_stream); - } - printf("@end table\n"); -} - -static void show_format_opts(void) -{ - printf("@section Format AVOptions\n"); - show_opts(avformat_options, 0); -} - -static void show_codec_opts(void) -{ - printf("@section Codec AVOptions\n"); - show_opts(avcodec_options, 1); -} - -int main(int argc, char **argv) -{ - if (argc < 2) - print_usage(); - - printf("@c DO NOT EDIT THIS FILE!\n" - "@c It was generated by print_options.\n\n"); - if (!strcmp(argv[1], "format")) - show_format_opts(); - else if (!strcmp(argv[1], "codec")) - show_codec_opts(); - else - print_usage(); - - return 0; -} diff --git a/ffmpeg/doc/protocols.texi b/ffmpeg/doc/protocols.texi deleted file mode 100644 index 57f9266..0000000 --- a/ffmpeg/doc/protocols.texi +++ /dev/null @@ -1,1104 +0,0 @@ -@chapter Protocols -@c man begin PROTOCOLS - -Protocols are configured elements in FFmpeg that enable access to -resources that require specific protocols. - -When you configure your FFmpeg build, all the supported protocols are -enabled by default. You can list all available ones using the -configure option "--list-protocols". - -You can disable all the protocols using the configure option -"--disable-protocols", and selectively enable a protocol using the -option "--enable-protocol=@var{PROTOCOL}", or you can disable a -particular protocol using the option -"--disable-protocol=@var{PROTOCOL}". - -The option "-protocols" of the ff* tools will display the list of -supported protocols. - -A description of the currently available protocols follows. - -@section bluray - -Read BluRay playlist. - -The accepted options are: -@table @option - -@item angle -BluRay angle - -@item chapter -Start chapter (1...N) - -@item playlist -Playlist to read (BDMV/PLAYLIST/?????.mpls) - -@end table - -Examples: - -Read longest playlist from BluRay mounted to /mnt/bluray: -@example -bluray:/mnt/bluray -@end example - -Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2: -@example --playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray -@end example - -@section cache - -Caching wrapper for input stream. - -Cache the input stream to temporary file. It brings seeking capability to live streams. - -@example -cache:@var{URL} -@end example - -@section concat - -Physical concatenation protocol. - -Allow to read and seek from many resource in sequence as if they were -a unique resource. - -A URL accepted by this protocol has the syntax: -@example -concat:@var{URL1}|@var{URL2}|...|@var{URLN} -@end example - -where @var{URL1}, @var{URL2}, ..., @var{URLN} are the urls of the -resource to be concatenated, each one possibly specifying a distinct -protocol. - -For example to read a sequence of files @file{split1.mpeg}, -@file{split2.mpeg}, @file{split3.mpeg} with @command{ffplay} use the -command: -@example -ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg -@end example - -Note that you may need to escape the character "|" which is special for -many shells. - -@section crypto - -AES-encrypted stream reading protocol. - -The accepted options are: -@table @option -@item key -Set the AES decryption key binary block from given hexadecimal representation. - -@item iv -Set the AES decryption initialization vector binary block from given hexadecimal representation. -@end table - -Accepted URL formats: -@example -crypto:@var{URL} -crypto+@var{URL} -@end example - -@section data - -Data in-line in the URI. See @url{http://en.wikipedia.org/wiki/Data_URI_scheme}. - -For example, to convert a GIF file given inline with @command{ffmpeg}: -@example -ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png -@end example - -@section file - -File access protocol. - -Allow to read from or read to a file. - -For example to read from a file @file{input.mpeg} with @command{ffmpeg} -use the command: -@example -ffmpeg -i file:input.mpeg output.mpeg -@end example - -The ff* tools default to the file protocol, that is a resource -specified with the name "FILE.mpeg" is interpreted as the URL -"file:FILE.mpeg". - -This protocol accepts the following options: - -@table @option -@item truncate -Truncate existing files on write, if set to 1. A value of 0 prevents -truncating. Default value is 1. - -@item blocksize -Set I/O operation maximum block size, in bytes. Default value is -@code{INT_MAX}, which results in not limiting the requested block size. -Setting this value reasonably low improves user termination request reaction -time, which is valuable for files on slow medium. -@end table - -@section ftp - -FTP (File Transfer Protocol). - -Allow to read from or write to remote resources using FTP protocol. - -Following syntax is required. -@example -ftp://[user[:password]@@]server[:port]/path/to/remote/resource.mpeg -@end example - -This protocol accepts the following options. - -@table @option -@item timeout -Set timeout of socket I/O operations used by the underlying low level -operation. By default it is set to -1, which means that the timeout is -not specified. - -@item ftp-anonymous-password -Password used when login as anonymous user. Typically an e-mail address -should be used. - -@item ftp-write-seekable -Control seekability of connection during encoding. If set to 1 the -resource is supposed to be seekable, if set to 0 it is assumed not -to be seekable. Default value is 0. -@end table - -NOTE: Protocol can be used as output, but it is recommended to not do -it, unless special care is taken (tests, customized server configuration -etc.). Different FTP servers behave in different way during seek -operation. ff* tools may produce incomplete content due to server limitations. - -@section gopher - -Gopher protocol. - -@section hls - -Read Apple HTTP Live Streaming compliant segmented stream as -a uniform one. The M3U8 playlists describing the segments can be -remote HTTP resources or local files, accessed using the standard -file protocol. -The nested protocol is declared by specifying -"+@var{proto}" after the hls URI scheme name, where @var{proto} -is either "file" or "http". - -@example -hls+http://host/path/to/remote/resource.m3u8 -hls+file://path/to/local/resource.m3u8 -@end example - -Using this protocol is discouraged - the hls demuxer should work -just as well (if not, please report the issues) and is more complete. -To use the hls demuxer instead, simply use the direct URLs to the -m3u8 files. - -@section http - -HTTP (Hyper Text Transfer Protocol). - -This protocol accepts the following options. - -@table @option -@item seekable -Control seekability of connection. If set to 1 the resource is -supposed to be seekable, if set to 0 it is assumed not to be seekable, -if set to -1 it will try to autodetect if it is seekable. Default -value is -1. - -@item chunked_post -If set to 1 use chunked transfer-encoding for posts, default is 1. - -@item headers -Set custom HTTP headers, can override built in default headers. The -value must be a string encoding the headers. - -@item content_type -Force a content type. - -@item user-agent -Override User-Agent header. If not specified the protocol will use a -string describing the libavformat build. - -@item multiple_requests -Use persistent connections if set to 1. By default it is 0. - -@item post_data -Set custom HTTP post data. - -@item timeout -Set timeout of socket I/O operations used by the underlying low level -operation. By default it is set to -1, which means that the timeout is -not specified. - -@item mime_type -Set MIME type. - -@item icy -If set to 1 request ICY (SHOUTcast) metadata from the server. If the server -supports this, the metadata has to be retrieved by the application by reading -the @option{icy_metadata_headers} and @option{icy_metadata_packet} options. -The default is 0. - -@item icy_metadata_headers -If the server supports ICY metadata, this contains the ICY specific HTTP reply -headers, separated with newline characters. - -@item icy_metadata_packet -If the server supports ICY metadata, and @option{icy} was set to 1, this -contains the last non-empty metadata packet sent by the server. - -@item cookies -Set the cookies to be sent in future requests. The format of each cookie is the -same as the value of a Set-Cookie HTTP response field. Multiple cookies can be -delimited by a newline character. -@end table - -@subsection HTTP Cookies - -Some HTTP requests will be denied unless cookie values are passed in with the -request. The @option{cookies} option allows these cookies to be specified. At -the very least, each cookie must specify a value along with a path and domain. -HTTP requests that match both the domain and path will automatically include the -cookie value in the HTTP Cookie header field. Multiple cookies can be delimited -by a newline. - -The required syntax to play a stream specifying a cookie is: -@example -ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8 -@end example - -@section mmst - -MMS (Microsoft Media Server) protocol over TCP. - -@section mmsh - -MMS (Microsoft Media Server) protocol over HTTP. - -The required syntax is: -@example -mmsh://@var{server}[:@var{port}][/@var{app}][/@var{playpath}] -@end example - -@section md5 - -MD5 output protocol. - -Computes the MD5 hash of the data to be written, and on close writes -this to the designated output or stdout if none is specified. It can -be used to test muxers without writing an actual file. - -Some examples follow. -@example -# Write the MD5 hash of the encoded AVI file to the file output.avi.md5. -ffmpeg -i input.flv -f avi -y md5:output.avi.md5 - -# Write the MD5 hash of the encoded AVI file to stdout. -ffmpeg -i input.flv -f avi -y md5: -@end example - -Note that some formats (typically MOV) require the output protocol to -be seekable, so they will fail with the MD5 output protocol. - -@section pipe - -UNIX pipe access protocol. - -Allow to read and write from UNIX pipes. - -The accepted syntax is: -@example -pipe:[@var{number}] -@end example - -@var{number} is the number corresponding to the file descriptor of the -pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If @var{number} -is not specified, by default the stdout file descriptor will be used -for writing, stdin for reading. - -For example to read from stdin with @command{ffmpeg}: -@example -cat test.wav | ffmpeg -i pipe:0 -# ...this is the same as... -cat test.wav | ffmpeg -i pipe: -@end example - -For writing to stdout with @command{ffmpeg}: -@example -ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi -# ...this is the same as... -ffmpeg -i test.wav -f avi pipe: | cat > test.avi -@end example - -This protocol accepts the following options: - -@table @option -@item blocksize -Set I/O operation maximum block size, in bytes. Default value is -@code{INT_MAX}, which results in not limiting the requested block size. -Setting this value reasonably low improves user termination request reaction -time, which is valuable if data transmission is slow. -@end table - -Note that some formats (typically MOV), require the output protocol to -be seekable, so they will fail with the pipe output protocol. - -@section rtmp - -Real-Time Messaging Protocol. - -The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia -content across a TCP/IP network. - -The required syntax is: -@example -rtmp://[@var{username}:@var{password}@@]@var{server}[:@var{port}][/@var{app}][/@var{instance}][/@var{playpath}] -@end example - -The accepted parameters are: -@table @option - -@item username -An optional username (mostly for publishing). - -@item password -An optional password (mostly for publishing). - -@item server -The address of the RTMP server. - -@item port -The number of the TCP port to use (by default is 1935). - -@item app -It is the name of the application to access. It usually corresponds to -the path where the application is installed on the RTMP server -(e.g. @file{/ondemand/}, @file{/flash/live/}, etc.). You can override -the value parsed from the URI through the @code{rtmp_app} option, too. - -@item playpath -It is the path or name of the resource to play with reference to the -application specified in @var{app}, may be prefixed by "mp4:". You -can override the value parsed from the URI through the @code{rtmp_playpath} -option, too. - -@item listen -Act as a server, listening for an incoming connection. - -@item timeout -Maximum time to wait for the incoming connection. Implies listen. -@end table - -Additionally, the following parameters can be set via command line options -(or in code via @code{AVOption}s): -@table @option - -@item rtmp_app -Name of application to connect on the RTMP server. This option -overrides the parameter specified in the URI. - -@item rtmp_buffer -Set the client buffer time in milliseconds. The default is 3000. - -@item rtmp_conn -Extra arbitrary AMF connection parameters, parsed from a string, -e.g. like @code{B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0}. -Each value is prefixed by a single character denoting the type, -B for Boolean, N for number, S for string, O for object, or Z for null, -followed by a colon. For Booleans the data must be either 0 or 1 for -FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or -1 to end or begin an object, respectively. Data items in subobjects may -be named, by prefixing the type with 'N' and specifying the name before -the value (i.e. @code{NB:myFlag:1}). This option may be used multiple -times to construct arbitrary AMF sequences. - -@item rtmp_flashver -Version of the Flash plugin used to run the SWF player. The default -is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible; -<libavformat version>).) - -@item rtmp_flush_interval -Number of packets flushed in the same request (RTMPT only). The default -is 10. - -@item rtmp_live -Specify that the media is a live stream. No resuming or seeking in -live streams is possible. The default value is @code{any}, which means the -subscriber first tries to play the live stream specified in the -playpath. If a live stream of that name is not found, it plays the -recorded stream. The other possible values are @code{live} and -@code{recorded}. - -@item rtmp_pageurl -URL of the web page in which the media was embedded. By default no -value will be sent. - -@item rtmp_playpath -Stream identifier to play or to publish. This option overrides the -parameter specified in the URI. - -@item rtmp_subscribe -Name of live stream to subscribe to. By default no value will be sent. -It is only sent if the option is specified or if rtmp_live -is set to live. - -@item rtmp_swfhash -SHA256 hash of the decompressed SWF file (32 bytes). - -@item rtmp_swfsize -Size of the decompressed SWF file, required for SWFVerification. - -@item rtmp_swfurl -URL of the SWF player for the media. By default no value will be sent. - -@item rtmp_swfverify -URL to player swf file, compute hash/size automatically. - -@item rtmp_tcurl -URL of the target stream. Defaults to proto://host[:port]/app. - -@end table - -For example to read with @command{ffplay} a multimedia resource named -"sample" from the application "vod" from an RTMP server "myserver": -@example -ffplay rtmp://myserver/vod/sample -@end example - -To publish to a password protected server, passing the playpath and -app names separately: -@example -ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@@myserver/ -@end example - -@section rtmpe - -Encrypted Real-Time Messaging Protocol. - -The Encrypted Real-Time Messaging Protocol (RTMPE) is used for -streaming multimedia content within standard cryptographic primitives, -consisting of Diffie-Hellman key exchange and HMACSHA256, generating -a pair of RC4 keys. - -@section rtmps - -Real-Time Messaging Protocol over a secure SSL connection. - -The Real-Time Messaging Protocol (RTMPS) is used for streaming -multimedia content across an encrypted connection. - -@section rtmpt - -Real-Time Messaging Protocol tunneled through HTTP. - -The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used -for streaming multimedia content within HTTP requests to traverse -firewalls. - -@section rtmpte - -Encrypted Real-Time Messaging Protocol tunneled through HTTP. - -The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE) -is used for streaming multimedia content within HTTP requests to traverse -firewalls. - -@section rtmpts - -Real-Time Messaging Protocol tunneled through HTTPS. - -The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used -for streaming multimedia content within HTTPS requests to traverse -firewalls. - -@section libssh - -Secure File Transfer Protocol via libssh - -Allow to read from or write to remote resources using SFTP protocol. - -Following syntax is required. - -@example -sftp://[user[:password]@@]server[:port]/path/to/remote/resource.mpeg -@end example - -This protocol accepts the following options. - -@table @option -@item timeout -Set timeout of socket I/O operations used by the underlying low level -operation. By default it is set to -1, which means that the timeout -is not specified. - -@item truncate -Truncate existing files on write, if set to 1. A value of 0 prevents -truncating. Default value is 1. - -@end table - -Example: Play a file stored on remote server. - -@example -ffplay sftp://user:password@@server_address:22/home/user/resource.mpeg -@end example - -@section librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte - -Real-Time Messaging Protocol and its variants supported through -librtmp. - -Requires the presence of the librtmp headers and library during -configuration. You need to explicitly configure the build with -"--enable-librtmp". If enabled this will replace the native RTMP -protocol. - -This protocol provides most client functions and a few server -functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT), -encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled -variants of these encrypted types (RTMPTE, RTMPTS). - -The required syntax is: -@example -@var{rtmp_proto}://@var{server}[:@var{port}][/@var{app}][/@var{playpath}] @var{options} -@end example - -where @var{rtmp_proto} is one of the strings "rtmp", "rtmpt", "rtmpe", -"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and -@var{server}, @var{port}, @var{app} and @var{playpath} have the same -meaning as specified for the RTMP native protocol. -@var{options} contains a list of space-separated options of the form -@var{key}=@var{val}. - -See the librtmp manual page (man 3 librtmp) for more information. - -For example, to stream a file in real-time to an RTMP server using -@command{ffmpeg}: -@example -ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream -@end example - -To play the same stream using @command{ffplay}: -@example -ffplay "rtmp://myserver/live/mystream live=1" -@end example - -@section rtp - -Real-time Transport Protocol. - -The required syntax for an RTP URL is: -rtp://@var{hostname}[:@var{port}][?@var{option}=@var{val}...] - -@var{port} specifies the RTP port to use. - -The following URL options are supported: - -@table @option - -@item ttl=@var{n} -Set the TTL (Time-To-Live) value (for multicast only). - -@item rtcpport=@var{n} -Set the remote RTCP port to @var{n}. - -@item localrtpport=@var{n} -Set the local RTP port to @var{n}. - -@item localrtcpport=@var{n}' -Set the local RTCP port to @var{n}. - -@item pkt_size=@var{n} -Set max packet size (in bytes) to @var{n}. - -@item connect=0|1 -Do a @code{connect()} on the UDP socket (if set to 1) or not (if set -to 0). - -@item sources=@var{ip}[,@var{ip}] -List allowed source IP addresses. - -@item block=@var{ip}[,@var{ip}] -List disallowed (blocked) source IP addresses. - -@item write_to_source=0|1 -Send packets to the source address of the latest received packet (if -set to 1) or to a default remote address (if set to 0). - -@item localport=@var{n} -Set the local RTP port to @var{n}. - -This is a deprecated option. Instead, @option{localrtpport} should be -used. - -@end table - -Important notes: - -@enumerate - -@item -If @option{rtcpport} is not set the RTCP port will be set to the RTP -port value plus 1. - -@item -If @option{localrtpport} (the local RTP port) is not set any available -port will be used for the local RTP and RTCP ports. - -@item -If @option{localrtcpport} (the local RTCP port) is not set it will be -set to the the local RTP port value plus 1. -@end enumerate - -@section rtsp - -RTSP is not technically a protocol handler in libavformat, it is a demuxer -and muxer. The demuxer supports both normal RTSP (with data transferred -over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with -data transferred over RDT). - -The muxer can be used to send a stream using RTSP ANNOUNCE to a server -supporting it (currently Darwin Streaming Server and Mischa Spiegelmock's -@uref{http://github.com/revmischa/rtsp-server, RTSP server}). - -The required syntax for a RTSP url is: -@example -rtsp://@var{hostname}[:@var{port}]/@var{path} -@end example - -The following options (set on the @command{ffmpeg}/@command{ffplay} command -line, or set in code via @code{AVOption}s or in @code{avformat_open_input}), -are supported: - -Flags for @code{rtsp_transport}: - -@table @option - -@item udp -Use UDP as lower transport protocol. - -@item tcp -Use TCP (interleaving within the RTSP control channel) as lower -transport protocol. - -@item udp_multicast -Use UDP multicast as lower transport protocol. - -@item http -Use HTTP tunneling as lower transport protocol, which is useful for -passing proxies. -@end table - -Multiple lower transport protocols may be specified, in that case they are -tried one at a time (if the setup of one fails, the next one is tried). -For the muxer, only the @code{tcp} and @code{udp} options are supported. - -Flags for @code{rtsp_flags}: - -@table @option -@item filter_src -Accept packets only from negotiated peer address and port. -@item listen -Act as a server, listening for an incoming connection. -@end table - -When receiving data over UDP, the demuxer tries to reorder received packets -(since they may arrive out of order, or packets may get lost totally). This -can be disabled by setting the maximum demuxing delay to zero (via -the @code{max_delay} field of AVFormatContext). - -When watching multi-bitrate Real-RTSP streams with @command{ffplay}, the -streams to display can be chosen with @code{-vst} @var{n} and -@code{-ast} @var{n} for video and audio respectively, and can be switched -on the fly by pressing @code{v} and @code{a}. - -Example command lines: - -To watch a stream over UDP, with a max reordering delay of 0.5 seconds: - -@example -ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4 -@end example - -To watch a stream tunneled over HTTP: - -@example -ffplay -rtsp_transport http rtsp://server/video.mp4 -@end example - -To send a stream in realtime to a RTSP server, for others to watch: - -@example -ffmpeg -re -i @var{input} -f rtsp -muxdelay 0.1 rtsp://server/live.sdp -@end example - -To receive a stream in realtime: - -@example -ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp @var{output} -@end example - -@table @option -@item stimeout -Socket IO timeout in micro seconds. -@end table - -@section sap - -Session Announcement Protocol (RFC 2974). This is not technically a -protocol handler in libavformat, it is a muxer and demuxer. -It is used for signalling of RTP streams, by announcing the SDP for the -streams regularly on a separate port. - -@subsection Muxer - -The syntax for a SAP url given to the muxer is: -@example -sap://@var{destination}[:@var{port}][?@var{options}] -@end example - -The RTP packets are sent to @var{destination} on port @var{port}, -or to port 5004 if no port is specified. -@var{options} is a @code{&}-separated list. The following options -are supported: - -@table @option - -@item announce_addr=@var{address} -Specify the destination IP address for sending the announcements to. -If omitted, the announcements are sent to the commonly used SAP -announcement multicast address 224.2.127.254 (sap.mcast.net), or -ff0e::2:7ffe if @var{destination} is an IPv6 address. - -@item announce_port=@var{port} -Specify the port to send the announcements on, defaults to -9875 if not specified. - -@item ttl=@var{ttl} -Specify the time to live value for the announcements and RTP packets, -defaults to 255. - -@item same_port=@var{0|1} -If set to 1, send all RTP streams on the same port pair. If zero (the -default), all streams are sent on unique ports, with each stream on a -port 2 numbers higher than the previous. -VLC/Live555 requires this to be set to 1, to be able to receive the stream. -The RTP stack in libavformat for receiving requires all streams to be sent -on unique ports. -@end table - -Example command lines follow. - -To broadcast a stream on the local subnet, for watching in VLC: - -@example -ffmpeg -re -i @var{input} -f sap sap://224.0.0.255?same_port=1 -@end example - -Similarly, for watching in @command{ffplay}: - -@example -ffmpeg -re -i @var{input} -f sap sap://224.0.0.255 -@end example - -And for watching in @command{ffplay}, over IPv6: - -@example -ffmpeg -re -i @var{input} -f sap sap://[ff0e::1:2:3:4] -@end example - -@subsection Demuxer - -The syntax for a SAP url given to the demuxer is: -@example -sap://[@var{address}][:@var{port}] -@end example - -@var{address} is the multicast address to listen for announcements on, -if omitted, the default 224.2.127.254 (sap.mcast.net) is used. @var{port} -is the port that is listened on, 9875 if omitted. - -The demuxers listens for announcements on the given address and port. -Once an announcement is received, it tries to receive that particular stream. - -Example command lines follow. - -To play back the first stream announced on the normal SAP multicast address: - -@example -ffplay sap:// -@end example - -To play back the first stream announced on one the default IPv6 SAP multicast address: - -@example -ffplay sap://[ff0e::2:7ffe] -@end example - -@section sctp - -Stream Control Transmission Protocol. - -The accepted URL syntax is: -@example -sctp://@var{host}:@var{port}[?@var{options}] -@end example - -The protocol accepts the following options: -@table @option -@item listen -If set to any value, listen for an incoming connection. Outgoing connection is done by default. - -@item max_streams -Set the maximum number of streams. By default no limit is set. -@end table - -@section srtp - -Secure Real-time Transport Protocol. - -The accepted options are: -@table @option -@item srtp_in_suite -@item srtp_out_suite -Select input and output encoding suites. - -Supported values: -@table @samp -@item AES_CM_128_HMAC_SHA1_80 -@item SRTP_AES128_CM_HMAC_SHA1_80 -@item AES_CM_128_HMAC_SHA1_32 -@item SRTP_AES128_CM_HMAC_SHA1_32 -@end table - -@item srtp_in_params -@item srtp_out_params -Set input and output encoding parameters, which are expressed by a -base64-encoded representation of a binary block. The first 16 bytes of -this binary block are used as master key, the following 14 bytes are -used as master salt. -@end table - -@section tcp - -Trasmission Control Protocol. - -The required syntax for a TCP url is: -@example -tcp://@var{hostname}:@var{port}[?@var{options}] -@end example - -@var{options} contains a list of &-separated options of the form -@var{key}=@var{val}. - -The list of supported options follows. - -@table @option -@item listen=@var{1|0} -Listen for an incoming connection. Default value is 0. - -@item timeout=@var{microseconds} -Set raise error timeout, expressed in microseconds. - -This option is only relevant in read mode: if no data arrived in more -than this time interval, raise error. - -@item listen_timeout=@var{microseconds} -Set listen timeout, expressed in microseconds. -@end table - -The following example shows how to setup a listening TCP connection -with @command{ffmpeg}, which is then accessed with @command{ffplay}: -@example -ffmpeg -i @var{input} -f @var{format} tcp://@var{hostname}:@var{port}?listen -ffplay tcp://@var{hostname}:@var{port} -@end example - -@section tls - -Transport Layer Security (TLS) / Secure Sockets Layer (SSL) - -The required syntax for a TLS/SSL url is: -@example -tls://@var{hostname}:@var{port}[?@var{options}] -@end example - -The following parameters can be set via command line options -(or in code via @code{AVOption}s): - -@table @option - -@item ca_file, cafile=@var{filename} -A file containing certificate authority (CA) root certificates to treat -as trusted. If the linked TLS library contains a default this might not -need to be specified for verification to work, but not all libraries and -setups have defaults built in. -The file must be in OpenSSL PEM format. - -@item tls_verify=@var{1|0} -If enabled, try to verify the peer that we are communicating with. -Note, if using OpenSSL, this currently only makes sure that the -peer certificate is signed by one of the root certificates in the CA -database, but it does not validate that the certificate actually -matches the host name we are trying to connect to. (With GnuTLS, -the host name is validated as well.) - -This is disabled by default since it requires a CA database to be -provided by the caller in many cases. - -@item cert_file, cert=@var{filename} -A file containing a certificate to use in the handshake with the peer. -(When operating as server, in listen mode, this is more often required -by the peer, while client certificates only are mandated in certain -setups.) - -@item key_file, key=@var{filename} -A file containing the private key for the certificate. - -@item listen=@var{1|0} -If enabled, listen for connections on the provided port, and assume -the server role in the handshake instead of the client role. - -@end table - -Example command lines: - -To create a TLS/SSL server that serves an input stream. - -@example -ffmpeg -i @var{input} -f @var{format} tls://@var{hostname}:@var{port}?listen&cert=@var{server.crt}&key=@var{server.key} -@end example - -To play back a stream from the TLS/SSL server using @command{ffplay}: - -@example -ffplay tls://@var{hostname}:@var{port} -@end example - -@section udp - -User Datagram Protocol. - -The required syntax for an UDP URL is: -@example -udp://@var{hostname}:@var{port}[?@var{options}] -@end example - -@var{options} contains a list of &-separated options of the form @var{key}=@var{val}. - -In case threading is enabled on the system, a circular buffer is used -to store the incoming data, which allows to reduce loss of data due to -UDP socket buffer overruns. The @var{fifo_size} and -@var{overrun_nonfatal} options are related to this buffer. - -The list of supported options follows. - -@table @option -@item buffer_size=@var{size} -Set the UDP socket buffer size in bytes. This is used both for the -receiving and the sending buffer size. - -@item localport=@var{port} -Override the local UDP port to bind with. - -@item localaddr=@var{addr} -Choose the local IP address. This is useful e.g. if sending multicast -and the host has multiple interfaces, where the user can choose -which interface to send on by specifying the IP address of that interface. - -@item pkt_size=@var{size} -Set the size in bytes of UDP packets. - -@item reuse=@var{1|0} -Explicitly allow or disallow reusing UDP sockets. - -@item ttl=@var{ttl} -Set the time to live value (for multicast only). - -@item connect=@var{1|0} -Initialize the UDP socket with @code{connect()}. In this case, the -destination address can't be changed with ff_udp_set_remote_url later. -If the destination address isn't known at the start, this option can -be specified in ff_udp_set_remote_url, too. -This allows finding out the source address for the packets with getsockname, -and makes writes return with AVERROR(ECONNREFUSED) if "destination -unreachable" is received. -For receiving, this gives the benefit of only receiving packets from -the specified peer address/port. - -@item sources=@var{address}[,@var{address}] -Only receive packets sent to the multicast group from one of the -specified sender IP addresses. - -@item block=@var{address}[,@var{address}] -Ignore packets sent to the multicast group from the specified -sender IP addresses. - -@item fifo_size=@var{units} -Set the UDP receiving circular buffer size, expressed as a number of -packets with size of 188 bytes. If not specified defaults to 7*4096. - -@item overrun_nonfatal=@var{1|0} -Survive in case of UDP receiving circular buffer overrun. Default -value is 0. - -@item timeout=@var{microseconds} -Set raise error timeout, expressed in microseconds. - -This option is only relevant in read mode: if no data arrived in more -than this time interval, raise error. -@end table - -@subsection Examples - -@itemize -@item -Use @command{ffmpeg} to stream over UDP to a remote endpoint: -@example -ffmpeg -i @var{input} -f @var{format} udp://@var{hostname}:@var{port} -@end example - -@item -Use @command{ffmpeg} to stream in mpegts format over UDP using 188 -sized UDP packets, using a large input buffer: -@example -ffmpeg -i @var{input} -f mpegts udp://@var{hostname}:@var{port}?pkt_size=188&buffer_size=65535 -@end example - -@item -Use @command{ffmpeg} to receive over UDP from a remote endpoint: -@example -ffmpeg -i udp://[@var{multicast-address}]:@var{port} ... -@end example -@end itemize - -@section unix - -Unix local socket - -The required syntax for a Unix socket URL is: - -@example -unix://@var{filepath} -@end example - -The following parameters can be set via command line options -(or in code via @code{AVOption}s): - -@table @option -@item timeout -Timeout in ms. -@item listen -Create the Unix socket in listening mode. -@end table - -@c man end PROTOCOLS diff --git a/ffmpeg/doc/rate_distortion.txt b/ffmpeg/doc/rate_distortion.txt deleted file mode 100644 index e9711c2..0000000 --- a/ffmpeg/doc/rate_distortion.txt +++ /dev/null @@ -1,61 +0,0 @@ -A Quick Description Of Rate Distortion Theory. - -We want to encode a video, picture or piece of music optimally. What does -"optimally" really mean? It means that we want to get the best quality at a -given filesize OR we want to get the smallest filesize at a given quality -(in practice, these 2 goals are usually the same). - -Solving this directly is not practical; trying all byte sequences 1 -megabyte in length and selecting the "best looking" sequence will yield -256^1000000 cases to try. - -But first, a word about quality, which is also called distortion. -Distortion can be quantified by almost any quality measurement one chooses. -Commonly, the sum of squared differences is used but more complex methods -that consider psychovisual effects can be used as well. It makes no -difference in this discussion. - - -First step: that rate distortion factor called lambda... -Let's consider the problem of minimizing: - - distortion + lambda*rate - -rate is the filesize -distortion is the quality -lambda is a fixed value chosen as a tradeoff between quality and filesize -Is this equivalent to finding the best quality for a given max -filesize? The answer is yes. For each filesize limit there is some lambda -factor for which minimizing above will get you the best quality (using your -chosen quality measurement) at the desired (or lower) filesize. - - -Second step: splitting the problem. -Directly splitting the problem of finding the best quality at a given -filesize is hard because we do not know how many bits from the total -filesize should be allocated to each of the subproblems. But the formula -from above: - - distortion + lambda*rate - -can be trivially split. Consider: - - (distortion0 + distortion1) + lambda*(rate0 + rate1) - -This creates a problem made of 2 independent subproblems. The subproblems -might be 2 16x16 macroblocks in a frame of 32x16 size. To minimize: - - (distortion0 + distortion1) + lambda*(rate0 + rate1) - -we just have to minimize: - - distortion0 + lambda*rate0 - -and - - distortion1 + lambda*rate1 - -I.e, the 2 problems can be solved independently. - -Author: Michael Niedermayer -Copyright: LGPL diff --git a/ffmpeg/doc/snow.txt b/ffmpeg/doc/snow.txt deleted file mode 100644 index 080a33b..0000000 --- a/ffmpeg/doc/snow.txt +++ /dev/null @@ -1,638 +0,0 @@ -============================================= -Snow Video Codec Specification Draft 20080110 -============================================= - -Introduction: -============= -This specification describes the Snow bitstream syntax and semantics as -well as the formal Snow decoding process. - -The decoding process is described precisely and any compliant decoder -MUST produce the exact same output for a spec-conformant Snow stream. -For encoding, though, any process which generates a stream compliant to -the syntactical and semantic requirements and which is decodable by -the process described in this spec shall be considered a conformant -Snow encoder. - -Definitions: -============ - -MUST the specific part must be done to conform to this standard -SHOULD it is recommended to be done that way, but not strictly required - -ilog2(x) is the rounded down logarithm of x with basis 2 -ilog2(0) = 0 - -Type definitions: -================= - -b 1-bit range coded -u unsigned scalar value range coded -s signed scalar value range coded - - -Bitstream syntax: -================= - -frame: - header - prediction - residual - -header: - keyframe b MID_STATE - if(keyframe || always_reset) - reset_contexts - if(keyframe){ - version u header_state - always_reset b header_state - temporal_decomposition_type u header_state - temporal_decomposition_count u header_state - spatial_decomposition_count u header_state - colorspace_type u header_state - if (nb_planes > 2) { - chroma_h_shift u header_state - chroma_v_shift u header_state - } - spatial_scalability b header_state - max_ref_frames-1 u header_state - qlogs - } - if(!keyframe){ - update_mc b header_state - if(update_mc){ - for(plane=0; plane<nb_plane_types; plane++){ - diag_mc b header_state - htaps/2-1 u header_state - for(i= p->htaps/2; i; i--) - |hcoeff[i]| u header_state - } - } - update_qlogs b header_state - if(update_qlogs){ - spatial_decomposition_count u header_state - qlogs - } - } - - spatial_decomposition_type s header_state - qlog s header_state - mv_scale s header_state - qbias s header_state - block_max_depth s header_state - -qlogs: - for(plane=0; plane<nb_plane_types; plane++){ - quant_table[plane][0][0] s header_state - for(level=0; level < spatial_decomposition_count; level++){ - quant_table[plane][level][1]s header_state - quant_table[plane][level][3]s header_state - } - } - -reset_contexts - *_state[*]= MID_STATE - -prediction: - for(y=0; y<block_count_vertical; y++) - for(x=0; x<block_count_horizontal; x++) - block(0) - -block(level): - mvx_diff=mvy_diff=y_diff=cb_diff=cr_diff=0 - if(keyframe){ - intra=1 - }else{ - if(level!=max_block_depth){ - s_context= 2*left->level + 2*top->level + topleft->level + topright->level - leaf b block_state[4 + s_context] - } - if(level==max_block_depth || leaf){ - intra b block_state[1 + left->intra + top->intra] - if(intra){ - y_diff s block_state[32] - cb_diff s block_state[64] - cr_diff s block_state[96] - }else{ - ref_context= ilog2(2*left->ref) + ilog2(2*top->ref) - if(ref_frames > 1) - ref u block_state[128 + 1024 + 32*ref_context] - mx_context= ilog2(2*abs(left->mx - top->mx)) - my_context= ilog2(2*abs(left->my - top->my)) - mvx_diff s block_state[128 + 32*(mx_context + 16*!!ref)] - mvy_diff s block_state[128 + 32*(my_context + 16*!!ref)] - } - }else{ - block(level+1) - block(level+1) - block(level+1) - block(level+1) - } - } - - -residual: - residual2(luma) - if (nb_planes > 2) { - residual2(chroma_cr) - residual2(chroma_cb) - } - -residual2: - for(level=0; level<spatial_decomposition_count; level++){ - if(level==0) - subband(LL, 0) - subband(HL, level) - subband(LH, level) - subband(HH, level) - } - -subband: - FIXME - -nb_plane_types = gray ? 1 : 2; - -Tag description: ----------------- - -version - 0 - this MUST NOT change within a bitstream - -always_reset - if 1 then the range coder contexts will be reset after each frame - -temporal_decomposition_type - 0 - -temporal_decomposition_count - 0 - -spatial_decomposition_count - FIXME - -colorspace_type - 0 unspecified YcbCr - 1 Gray - 2 Gray + Alpha - 3 GBR - 4 GBRA - this MUST NOT change within a bitstream - -chroma_h_shift - log2(luma.width / chroma.width) - this MUST NOT change within a bitstream - -chroma_v_shift - log2(luma.height / chroma.height) - this MUST NOT change within a bitstream - -spatial_scalability - 0 - -max_ref_frames - maximum number of reference frames - this MUST NOT change within a bitstream - -update_mc - indicates that motion compensation filter parameters are stored in the - header - -diag_mc - flag to enable faster diagonal interpolation - this SHOULD be 1 unless it turns out to be covered by a valid patent - -htaps - number of half pel interpolation filter taps, MUST be even, >0 and <10 - -hcoeff - half pel interpolation filter coefficients, hcoeff[0] are the 2 middle - coefficients [1] are the next outer ones and so on, resulting in a filter - like: ...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... - the sign of the coefficients is not explicitly stored but alternates - after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... - hcoeff[0] is not explicitly stored but found by subtracting the sum - of all stored coefficients with signs from 32 - hcoeff[0]= 32 - hcoeff[1] - hcoeff[2] - ... - a good choice for hcoeff and htaps is - htaps= 6 - hcoeff={40,-10,2} - an alternative which requires more computations at both encoder and - decoder side and may or may not be better is - htaps= 8 - hcoeff={42,-14,6,-2} - - -ref_frames - minimum of the number of available reference frames and max_ref_frames - for example the first frame after a key frame always has ref_frames=1 - -spatial_decomposition_type - wavelet type - 0 is a 9/7 symmetric compact integer wavelet - 1 is a 5/3 symmetric compact integer wavelet - others are reserved - stored as delta from last, last is reset to 0 if always_reset || keyframe - -qlog - quality (logarthmic quantizer scale) - stored as delta from last, last is reset to 0 if always_reset || keyframe - -mv_scale - stored as delta from last, last is reset to 0 if always_reset || keyframe - FIXME check that everything works fine if this changes between frames - -qbias - dequantization bias - stored as delta from last, last is reset to 0 if always_reset || keyframe - -block_max_depth - maximum depth of the block tree - stored as delta from last, last is reset to 0 if always_reset || keyframe - -quant_table - quantiztation table - - -Highlevel bitstream structure: -============================= - -------------------------------------------- -| Header | - -------------------------------------------- -| ------------------------------------ | -| | Block0 | | -| | split? | | -| | yes no | | -| | ......... intra? | | -| | : Block01 : yes no | | -| | : Block02 : ....... .......... | | -| | : Block03 : : y DC : : ref index: | | -| | : Block04 : : cb DC : : motion x : | | -| | ......... : cr DC : : motion y : | | -| | ....... .......... | | -| ------------------------------------ | -| ------------------------------------ | -| | Block1 | | -| ... | - -------------------------------------------- -| ------------ ------------ ------------ | -|| Y subbands | | Cb subbands| | Cr subbands|| -|| --- --- | | --- --- | | --- --- || -|| |LL0||HL0| | | |LL0||HL0| | | |LL0||HL0| || -|| --- --- | | --- --- | | --- --- || -|| --- --- | | --- --- | | --- --- || -|| |LH0||HH0| | | |LH0||HH0| | | |LH0||HH0| || -|| --- --- | | --- --- | | --- --- || -|| --- --- | | --- --- | | --- --- || -|| |HL1||LH1| | | |HL1||LH1| | | |HL1||LH1| || -|| --- --- | | --- --- | | --- --- || -|| --- --- | | --- --- | | --- --- || -|| |HH1||HL2| | | |HH1||HL2| | | |HH1||HL2| || -|| ... | | ... | | ... || -| ------------ ------------ ------------ | - -------------------------------------------- - -Decoding process: -================= - - ------------ - | | - | Subbands | - ------------ | | - | | ------------ - | Intra DC | | - | | LL0 subband prediction - ------------ | - \ Dequantizaton - ------------------- \ | -| Reference frames | \ IDWT -| ------- ------- | Motion \ | -||Frame 0| |Frame 1|| Compensation . OBMC v ------- -| ------- ------- | --------------. \------> + --->|Frame n|-->output -| ------- ------- | ------- -||Frame 2| |Frame 3||<----------------------------------/ -| ... | - ------------------- - - -Range Coder: -============ - -Binary Range Coder: -------------------- -The implemented range coder is an adapted version based upon "Range encoding: -an algorithm for removing redundancy from a digitised message." by G. N. N. -Martin. -The symbols encoded by the Snow range coder are bits (0|1). The -associated probabilities are not fix but change depending on the symbol mix -seen so far. - - -bit seen | new state ----------+----------------------------------------------- - 0 | 256 - state_transition_table[256 - old_state]; - 1 | state_transition_table[ old_state]; - -state_transition_table = { - 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, - 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, - 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, - 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, -104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, -119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, -134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, -150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, -165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, -180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, -195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, -210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, -226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, -241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0}; - -FIXME - - -Range Coding of integers: -------------------------- -FIXME - - -Neighboring Blocks: -=================== -left and top are set to the respective blocks unless they are outside of -the image in which case they are set to the Null block - -top-left is set to the top left block unless it is outside of the image in -which case it is set to the left block - -if this block has no larger parent block or it is at the left side of its -parent block and the top right block is not outside of the image then the -top right block is used for top-right else the top-left block is used - -Null block -y,cb,cr are 128 -level, ref, mx and my are 0 - - -Motion Vector Prediction: -========================= -1. the motion vectors of all the neighboring blocks are scaled to -compensate for the difference of reference frames - -scaled_mv= (mv * (256 * (current_reference+1) / (mv.reference+1)) + 128)>>8 - -2. the median of the scaled left, top and top-right vectors is used as -motion vector prediction - -3. the used motion vector is the sum of the predictor and - (mvx_diff, mvy_diff)*mv_scale - - -Intra DC Predicton: -====================== -the luma and chroma values of the left block are used as predictors - -the used luma and chroma is the sum of the predictor and y_diff, cb_diff, cr_diff -to reverse this in the decoder apply the following: -block[y][x].dc[0] = block[y][x-1].dc[0] + y_diff; -block[y][x].dc[1] = block[y][x-1].dc[1] + cb_diff; -block[y][x].dc[2] = block[y][x-1].dc[2] + cr_diff; -block[*][-1].dc[*]= 128; - - -Motion Compensation: -==================== - -Halfpel interpolation: ----------------------- -halfpel interpolation is done by convolution with the halfpel filter stored -in the header: - -horizontal halfpel samples are found by -H1[y][x] = hcoeff[0]*(F[y][x ] + F[y][x+1]) - + hcoeff[1]*(F[y][x-1] + F[y][x+2]) - + hcoeff[2]*(F[y][x-2] + F[y][x+3]) - + ... -h1[y][x] = (H1[y][x] + 32)>>6; - -vertical halfpel samples are found by -H2[y][x] = hcoeff[0]*(F[y ][x] + F[y+1][x]) - + hcoeff[1]*(F[y-1][x] + F[y+2][x]) - + ... -h2[y][x] = (H2[y][x] + 32)>>6; - -vertical+horizontal halfpel samples are found by -H3[y][x] = hcoeff[0]*(H2[y][x ] + H2[y][x+1]) - + hcoeff[1]*(H2[y][x-1] + H2[y][x+2]) - + ... -H3[y][x] = hcoeff[0]*(H1[y ][x] + H1[y+1][x]) - + hcoeff[1]*(H1[y+1][x] + H1[y+2][x]) - + ... -h3[y][x] = (H3[y][x] + 2048)>>12; - - - F H1 F - | | | - | | | - | | | - F H1 F - | | | - | | | - | | | - F-------F-------F-> H1<-F-------F-------F - v v v - H2 H3 H2 - ^ ^ ^ - F-------F-------F-> H1<-F-------F-------F - | | | - | | | - | | | - F H1 F - | | | - | | | - | | | - F H1 F - - -unavailable fullpel samples (outside the picture for example) shall be equal -to the closest available fullpel sample - - -Smaller pel interpolation: --------------------------- -if diag_mc is set then points which lie on a line between 2 vertically, -horiziontally or diagonally adjacent halfpel points shall be interpolated -linearls with rounding to nearest and halfway values rounded up. -points which lie on 2 diagonals at the same time should only use the one -diagonal not containing the fullpel point - - - - F-->O---q---O<--h1->O---q---O<--F - v \ / v \ / v - O O O O O O O - | / | \ | - q q q q q - | / | \ | - O O O O O O O - ^ / \ ^ / \ ^ - h2-->O---q---O<--h3->O---q---O<--h2 - v \ / v \ / v - O O O O O O O - | \ | / | - q q q q q - | \ | / | - O O O O O O O - ^ / \ ^ / \ ^ - F-->O---q---O<--h1->O---q---O<--F - - - -the remaining points shall be bilinearly interpolated from the -up to 4 surrounding halfpel and fullpel points, again rounding should be to -nearest and halfway values rounded up - -compliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chroma -interpolation at least - - -Overlapped block motion compensation: -------------------------------------- -FIXME - -LL band prediction: -=================== -Each sample in the LL0 subband is predicted by the median of the left, top and -left+top-topleft samples, samples outside the subband shall be considered to -be 0. To reverse this prediction in the decoder apply the following. -for(y=0; y<height; y++){ - for(x=0; x<width; x++){ - sample[y][x] += median(sample[y-1][x], - sample[y][x-1], - sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]); - } -} -sample[-1][*]=sample[*][-1]= 0; -width,height here are the width and height of the LL0 subband not of the final -video - - -Dequantizaton: -============== -FIXME - -Wavelet Transform: -================== - -Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integer -transform and a integer approximation of the symmetric biorthogonal 9/7 -daubechies wavelet. - -2D IDWT (inverse discrete wavelet transform) --------------------------------------------- -The 2D IDWT applies a 2D filter recursively, each time combining the -4 lowest frequency subbands into a single subband until only 1 subband -remains. -The 2D filter is done by first applying a 1D filter in the vertical direction -and then applying it in the horizontal one. - --------------- --------------- --------------- --------------- -|LL0|HL0| | | | | | | | | | | | -|---+---| HL1 | | L0|H0 | HL1 | | LL1 | HL1 | | | | -|LH0|HH0| | | | | | | | | | | | -|-------+-------|->|-------+-------|->|-------+-------|->| L1 | H1 |->... -| | | | | | | | | | | | -| LH1 | HH1 | | LH1 | HH1 | | LH1 | HH1 | | | | -| | | | | | | | | | | | - --------------- --------------- --------------- --------------- - - -1D Filter: ----------- -1. interleave the samples of the low and high frequency subbands like -s={L0, H0, L1, H1, L2, H2, L3, H3, ... } -note, this can end with a L or a H, the number of elements shall be w -s[-1] shall be considered equivalent to s[1 ] -s[w ] shall be considered equivalent to s[w-2] - -2. perform the lifting steps in order as described below - -5/3 Integer filter: -1. s[i] -= (s[i-1] + s[i+1] + 2)>>2; for all even i < w -2. s[i] += (s[i-1] + s[i+1] )>>1; for all odd i < w - -\ | /|\ | /|\ | /|\ | /|\ - \|/ | \|/ | \|/ | \|/ | - + | + | + | + | -1/4 - /|\ | /|\ | /|\ | /|\ | -/ | \|/ | \|/ | \|/ | \|/ - | + | + | + | + +1/2 - - -Snow's 9/7 Integer filter: -1. s[i] -= (3*(s[i-1] + s[i+1]) + 4)>>3; for all even i < w -2. s[i] -= s[i-1] + s[i+1] ; for all odd i < w -3. s[i] += ( s[i-1] + s[i+1] + 4*s[i] + 8)>>4; for all even i < w -4. s[i] += (3*(s[i-1] + s[i+1]) )>>1; for all odd i < w - -\ | /|\ | /|\ | /|\ | /|\ - \|/ | \|/ | \|/ | \|/ | - + | + | + | + | -3/8 - /|\ | /|\ | /|\ | /|\ | -/ | \|/ | \|/ | \|/ | \|/ - (| + (| + (| + (| + -1 -\ + /|\ + /|\ + /|\ + /|\ +1/4 - \|/ | \|/ | \|/ | \|/ | - + | + | + | + | +1/16 - /|\ | /|\ | /|\ | /|\ | -/ | \|/ | \|/ | \|/ | \|/ - | + | + | + | + +3/2 - -optimization tips: -following are exactly identical -(3a)>>1 == a + (a>>1) -(a + 4b + 8)>>4 == ((a>>2) + b + 2)>>2 - -16bit implementation note: -The IDWT can be implemented with 16bits, but this requires some care to -prevent overflows, the following list, lists the minimum number of bits needed -for some terms -1. lifting step -A= s[i-1] + s[i+1] 16bit -3*A + 4 18bit -A + (A>>1) + 2 17bit - -3. lifting step -s[i-1] + s[i+1] 17bit - -4. lifiting step -3*(s[i-1] + s[i+1]) 17bit - - -TODO: -===== -Important: -finetune initial contexts -flip wavelet? -try to use the wavelet transformed predicted image (motion compensated image) as context for coding the residual coefficients -try the MV length as context for coding the residual coefficients -use extradata for stuff which is in the keyframes now? -the MV median predictor is patented IIRC -implement per picture halfpel interpolation -try different range coder state transition tables for different contexts - -Not Important: -compare the 6 tap and 8 tap hpel filters (psnr/bitrate and subjective quality) -spatial_scalability b vs u (!= 0 breaks syntax anyway so we can add a u later) - - -Credits: -======== -Michael Niedermayer -Loren Merritt - - -Copyright: -========== -GPL + GFDL + whatever is needed to make this a RFC diff --git a/ffmpeg/doc/swresample.txt b/ffmpeg/doc/swresample.txt deleted file mode 100644 index 2d192a3..0000000 --- a/ffmpeg/doc/swresample.txt +++ /dev/null @@ -1,46 +0,0 @@ - The official guide to swresample for confused developers. - ========================================================= - -Current (simplified) Architecture: ---------------------------------- - Input - v - __________________/|\___________ - / | \ - / input sample format convert v - / | ___________/ - | |/ - | v - | ___________/|\___________ _____________ - | / | \ | | - | Rematrix | resample <---->| Buffers | - | \___________ | ___________/ |_____________| - v \|/ -Special Converter v - v ___________/|\___________ _____________ - | / | \ | | - | Rematrix | resample <---->| Buffers | - | \___________ | ___________/ |_____________| - | \|/ - | v - | |\___________ - \ | \ - \ output sample format convert v - \_________________ | ___________/ - \|/ - v - Output - -Planar/Packed conversion is done when needed during sample format conversion. -Every step can be skipped without memcpy when it is not needed. -Either Resampling and Rematrixing can be performed first depending on which -way it is faster. -The Buffers are needed for resampling due to resamplng being a process that -requires future and past data, it thus also introduces inevitably a delay when -used. -Internally 32bit float and 16bit int is supported currently, other formats can -easily be added. -Externally all sample formats in packed and planar configuration are supported -It's also trivial to add special converters for common cases. -If only sample format and/or packed/planar conversion is needed, it -is performed from input to output directly in a single pass with no intermediates. diff --git a/ffmpeg/doc/swscale.txt b/ffmpeg/doc/swscale.txt deleted file mode 100644 index 2066009..0000000 --- a/ffmpeg/doc/swscale.txt +++ /dev/null @@ -1,98 +0,0 @@ - The official guide to swscale for confused developers. - ======================================================== - -Current (simplified) Architecture: ---------------------------------- - Input - v - _______OR_________ - / \ - / \ - special converter [Input to YUV converter] - | | - | (8bit YUV 4:4:4 / 4:2:2 / 4:2:0 / 4:0:0 ) - | | - | v - | Horizontal scaler - | | - | (15bit YUV 4:4:4 / 4:2:2 / 4:2:0 / 4:1:1 / 4:0:0 ) - | | - | v - | Vertical scaler and output converter - | | - v v - output - - -Swscale has 2 scaler paths. Each side must be capable of handling -slices, that is, consecutive non-overlapping rectangles of dimension -(0,slice_top) - (picture_width, slice_bottom). - -special converter - These generally are unscaled converters of common - formats, like YUV 4:2:0/4:2:2 -> RGB12/15/16/24/32. Though it could also - in principle contain scalers optimized for specific common cases. - -Main path - The main path is used when no special converter can be used. The code - is designed as a destination line pull architecture. That is, for each - output line the vertical scaler pulls lines from a ring buffer. When - the ring buffer does not contain the wanted line, then it is pulled from - the input slice through the input converter and horizontal scaler. - The result is also stored in the ring buffer to serve future vertical - scaler requests. - When no more output can be generated because lines from a future slice - would be needed, then all remaining lines in the current slice are - converted, horizontally scaled and put in the ring buffer. - [This is done for luma and chroma, each with possibly different numbers - of lines per picture.] - -Input to YUV Converter - When the input to the main path is not planar 8 bits per component YUV or - 8-bit gray, it is converted to planar 8-bit YUV. Two sets of converters - exist for this currently: One performs horizontal downscaling by 2 - before the conversion, the other leaves the full chroma resolution, - but is slightly slower. The scaler will try to preserve full chroma - when the output uses it. It is possible to force full chroma with - SWS_FULL_CHR_H_INP even for cases where the scaler thinks it is useless. - -Horizontal scaler - There are several horizontal scalers. A special case worth mentioning is - the fast bilinear scaler that is made of runtime-generated MMXEXT code - using specially tuned pshufw instructions. - The remaining scalers are specially-tuned for various filter lengths. - They scale 8-bit unsigned planar data to 16-bit signed planar data. - Future >8 bits per component inputs will need to add a new horizontal - scaler that preserves the input precision. - -Vertical scaler and output converter - There is a large number of combined vertical scalers + output converters. - Some are: - * unscaled output converters - * unscaled output converters that average 2 chroma lines - * bilinear converters (C, MMX and accurate MMX) - * arbitrary filter length converters (C, MMX and accurate MMX) - And - * Plain C 8-bit 4:2:2 YUV -> RGB converters using LUTs - * Plain C 17-bit 4:4:4 YUV -> RGB converters using multiplies - * MMX 11-bit 4:2:2 YUV -> RGB converters - * Plain C 16-bit Y -> 16-bit gray - ... - - RGB with less than 8 bits per component uses dither to improve the - subjective quality and low-frequency accuracy. - - -Filter coefficients: --------------------- -There are several different scalers (bilinear, bicubic, lanczos, area, -sinc, ...). Their coefficients are calculated in initFilter(). -Horizontal filter coefficients have a 1.0 point at 1 << 14, vertical ones at -1 << 12. The 1.0 points have been chosen to maximize precision while leaving -a little headroom for convolutional filters like sharpening filters and -minimizing SIMD instructions needed to apply them. -It would be trivial to use a different 1.0 point if some specific scaler -would benefit from it. -Also, as already hinted at, initFilter() accepts an optional convolutional -filter as input that can be used for contrast, saturation, blur, sharpening -shift, chroma vs. luma shift, ... diff --git a/ffmpeg/doc/t2h.init b/ffmpeg/doc/t2h.init deleted file mode 100644 index e7760f4..0000000 --- a/ffmpeg/doc/t2h.init +++ /dev/null @@ -1,115 +0,0 @@ -# no horiz rules between sections -$end_section = \&FFmpeg_end_section; -sub FFmpeg_end_section($$) -{ -} - -$EXTRA_HEAD = -'<link rel="icon" href="favicon.png" type="image/png" /> -'; - -$CSS_LINES = $ENV{"FFMPEG_CSS"} || <<EOT; -<link rel="stylesheet" type="text/css" href="default.css" /> -EOT - -my $TEMPLATE_HEADER = $ENV{"FFMPEG_HEADER"} || <<EOT; -<link rel="icon" href="favicon.png" type="image/png" /> -</head> -<body> -<div id="container"> -<div id="body"> -EOT - -$PRE_BODY_CLOSE = '</div></div>'; - -$SMALL_RULE = ''; -$BODYTEXT = ''; - -$print_page_foot = \&FFmpeg_print_page_foot; -sub FFmpeg_print_page_foot($$) -{ - my $fh = shift; - my $program_string = defined &T2H_DEFAULT_program_string ? - T2H_DEFAULT_program_string() : program_string(); - print $fh '<footer class="footer pagination-right">' . "\n"; - print $fh '<span class="label label-info">' . $program_string; - print $fh "</span></footer></div></div></body>\n"; -} - -$float = \&FFmpeg_float; - -sub FFmpeg_float($$$$) -{ - my $text = shift; - my $float = shift; - my $caption = shift; - my $shortcaption = shift; - - my $label = ''; - if (exists($float->{'id'})) - { - $label = &$anchor($float->{'id'}); - } - my $class = ''; - my $subject = ''; - - if ($caption =~ /NOTE/) - { - $class = "alert alert-info"; - } - elsif ($caption =~ /IMPORTANT/) - { - $class = "alert alert-warning"; - } - - return '<div class="float ' . $class . '">' . "$label\n" . $text . '</div>'; -} - -$print_page_head = \&FFmpeg_print_page_head; -sub FFmpeg_print_page_head($$) -{ - my $fh = shift; - my $longtitle = "$Texi2HTML::THISDOC{'fulltitle_no_texi'}"; - $longtitle .= ": $Texi2HTML::NO_TEXI{'This'}" if exists $Texi2HTML::NO_TEXI{'This'}; - my $description = $DOCUMENT_DESCRIPTION; - $description = $longtitle if (!defined($description)); - $description = "<meta name=\"description\" content=\"$description\">" if - ($description ne ''); - $description = $Texi2HTML::THISDOC{'documentdescription'} if (defined($Texi2HTML::THISDOC{'documentdescription'})); - my $encoding = ''; - $encoding = "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=$ENCODING\">" if (defined($ENCODING) and ($ENCODING ne '')); - $longtitle =~ s/Documentation.*//g; - $longtitle = "FFmpeg documentation : " . $longtitle; - - print $fh <<EOT; -<!DOCTYPE html> -<html> -$Texi2HTML::THISDOC{'copying'}<!-- Created on $Texi2HTML::THISDOC{today} by $Texi2HTML::THISDOC{program} --> -<!-- -$Texi2HTML::THISDOC{program_authors} ---> -<head> -<title>$longtitle</title> - -$description -<meta name="keywords" content="$longtitle"> -<meta name="Generator" content="$Texi2HTML::THISDOC{program}"> -$encoding -$CSS_LINES -$TEMPLATE_HEADER -EOT -} - -# declare encoding in header -$IN_ENCODING = $ENCODING = "utf-8"; - -# no navigation elements -$SECTION_NAVIGATION = 0; -# the same for texi2html 5.0 -$HEADERS = 0; - -# TOC and Chapter headings link -$TOC_LINKS = 1; - -# print the TOC where @contents is used -$INLINE_CONTENTS = 1; diff --git a/ffmpeg/doc/tablegen.txt b/ffmpeg/doc/tablegen.txt deleted file mode 100644 index 4c4f036..0000000 --- a/ffmpeg/doc/tablegen.txt +++ /dev/null @@ -1,70 +0,0 @@ -Writing a table generator - -This documentation is preliminary. -Parts of the API are not good and should be changed. - -Basic concepts - -A table generator consists of two files, *_tablegen.c and *_tablegen.h. -The .h file will provide the variable declarations and initialization -code for the tables, the .c calls the initialization code and then prints -the tables as a header file using the tableprint.h helpers. -Both of these files will be compiled for the host system, so to avoid -breakage with cross-compilation neither of them may include, directly -or indirectly, config.h or avconfig.h. -This means that e.g. libavutil/mathematics.h is ok but libavutil/libm.h is not. -Due to this, the .c file or Makefile may have to provide additional defines -or stubs, though if possible this should be avoided. -In particular, CONFIG_HARDCODED_TABLES should always be defined to 0. - -The .c file - -This file should include the *_tablegen.h and tableprint.h files and -anything else it needs as long as it does not depend on config.h or -avconfig.h. -In addition to that it must contain a main() function which initializes -all tables by calling the init functions from the .h file and then prints -them. -The printing code typically looks like this: - write_fileheader(); - printf("static const uint8_t my_array[100] = {\n"); - write_uint8_t_array(my_array, 100); - printf("};\n"); - -This is the more generic form, in case you need to do something special. -Usually you should instead use the short form: - write_fileheader(); - WRITE_ARRAY("static const", uint8_t, my_array); - -write_fileheader() adds some minor things like a "this is a generated file" -comment and some standard includes. -tablegen.h defines some write functions for one- and two-dimensional arrays -for standard types - they print only the "core" parts so they are easier -to reuse for multi-dimensional arrays so the outermost {} must be printed -separately. -If there's no standard function for printing the type you need, the -WRITE_1D_FUNC_ARGV macro is a very quick way to create one. -See libavcodec/dv_tablegen.c for an example. - - -The .h file - -This file should contain: - - one or more initialization functions - - the table variable declarations -If CONFIG_HARDCODED_TABLES is set, the initialization functions should -not do anything, and instead of the variable declarations the -generated *_tables.h file should be included. -Since that will be generated in the build directory, the path must be -included, i.e. -#include "libavcodec/example_tables.h" -not -#include "example_tables.h" - -Makefile changes - -To make the automatic table creation work, you must manually declare the -new dependency. -For this add a line similar to this: -$(SUBDIR)example.o: $(SUBDIR)example_tables.h -under the "ifdef CONFIG_HARDCODED_TABLES" section in the Makefile. diff --git a/ffmpeg/doc/texi2pod.pl b/ffmpeg/doc/texi2pod.pl deleted file mode 100755 index 6cf78d8..0000000 --- a/ffmpeg/doc/texi2pod.pl +++ /dev/null @@ -1,453 +0,0 @@ -#!/usr/bin/env perl - -# Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc. - -# This file is part of GNU CC. - -# GNU CC is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2, or (at your option) -# any later version. - -# GNU CC is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with GNU CC; see the file COPYING. If not, write to -# the Free Software Foundation, 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301 USA - -# This does trivial (and I mean _trivial_) conversion of Texinfo -# markup to Perl POD format. It's intended to be used to extract -# something suitable for a manpage from a Texinfo document. - -use warnings; - -$output = 0; -$skipping = 0; -%chapters = (); -@chapters_sequence = (); -$chapter = ""; -@icstack = (); -@endwstack = (); -@skstack = (); -@instack = (); -$shift = ""; -%defs = (); -$fnno = 1; -$inf = ""; -@ibase = (); - -while ($_ = shift) { - if (/^-D(.*)$/) { - if ($1 ne "") { - $flag = $1; - } else { - $flag = shift; - } - $value = ""; - ($flag, $value) = ($flag =~ /^([^=]+)(?:=(.+))?/); - die "no flag specified for -D\n" - unless $flag ne ""; - die "flags may only contain letters, digits, hyphens, dashes and underscores\n" - unless $flag =~ /^[a-zA-Z0-9_-]+$/; - $defs{$flag} = $value; - } elsif (/^-I(.*)$/) { - push @ibase, $1 ne "" ? $1 : shift; - } elsif (/^-/) { - usage(); - } else { - $in = $_, next unless defined $in; - $out = $_, next unless defined $out; - usage(); - } -} - -push @ibase, "."; - -if (defined $in) { - $inf = gensym(); - open($inf, "<$in") or die "opening \"$in\": $!\n"; - push @ibase, $1 if $in =~ m|^(.+)/[^/]+$|; -} else { - $inf = \*STDIN; -} - -if (defined $out) { - open(STDOUT, ">$out") or die "opening \"$out\": $!\n"; -} - -while(defined $inf) { -INF: while(<$inf>) { - # Certain commands are discarded without further processing. - /^\@(?: - [a-z]+index # @*index: useful only in complete manual - |need # @need: useful only in printed manual - |(?:end\s+)?group # @group .. @end group: ditto - |page # @page: ditto - |node # @node: useful only in .info file - |(?:end\s+)?ifnottex # @ifnottex .. @end ifnottex: use contents - )\b/x and next; - - chomp; - - # Look for filename and title markers. - /^\@setfilename\s+([^.]+)/ and $fn = $1, next; - /^\@settitle\s+([^.]+)/ and $tl = postprocess($1), next; - - # Identify a man title but keep only the one we are interested in. - /^\@c\s+man\s+title\s+([A-Za-z0-9-]+)\s+(.+)/ and do { - if (exists $defs{$1}) { - $fn = $1; - $tl = postprocess($2); - } - next; - }; - - /^\@include\s+(.+)$/ and do { - push @instack, $inf; - $inf = gensym(); - - for (@ibase) { - open($inf, "<" . $_ . "/" . $1) and next INF; - } - die "cannot open $1: $!\n"; - }; - - /^\@chapter\s+([A-Za-z ]+)/ and do { - # close old chapter - $chapters{$chapter_name} .= postprocess($chapter) if ($chapter_name); - - # start new chapter - $chapter_name = $1, push (@chapters_sequence, $chapter_name) unless $skipping; - $chapters{$chapter_name} = "" unless exists $chapters{$chapter_name}; - $chapter = ""; - $output = 1; - next; - }; - - /^\@bye/ and do { - # close old chapter - $chapters{$chapter_name} .= postprocess($chapter) if ($chapter_name); - last INF; - }; - - # handle variables - /^\@set\s+([a-zA-Z0-9_-]+)\s*(.*)$/ and do { - $defs{$1} = $2; - next; - }; - /^\@clear\s+([a-zA-Z0-9_-]+)/ and do { - delete $defs{$1}; - next; - }; - - next unless $output; - - # Discard comments. (Can't do it above, because then we'd never see - # @c man lines.) - /^\@c\b/ and next; - - # End-block handler goes up here because it needs to operate even - # if we are skipping. - /^\@end\s+([a-z]+)/ and do { - # Ignore @end foo, where foo is not an operation which may - # cause us to skip, if we are presently skipping. - my $ended = $1; - next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex|ifhtml|ifnothtml)$/; - - die "\@end $ended without \@$ended at line $.\n" unless defined $endw; - die "\@$endw ended by \@end $ended at line $.\n" unless $ended eq $endw; - - $endw = pop @endwstack; - - if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex|ifhtml|ifnothtml)$/) { - $skipping = pop @skstack; - next; - } elsif ($ended =~ /^(?:example|smallexample|display)$/) { - $shift = ""; - $_ = ""; # need a paragraph break - } elsif ($ended =~ /^(?:itemize|enumerate|(?:multi|[fv])?table)$/) { - $_ = "\n=back\n"; - $ic = pop @icstack; - } else { - die "unknown command \@end $ended at line $.\n"; - } - }; - - # We must handle commands which can cause skipping even while we - # are skipping, otherwise we will not process nested conditionals - # correctly. - /^\@ifset\s+([a-zA-Z0-9_-]+)/ and do { - push @endwstack, $endw; - push @skstack, $skipping; - $endw = "ifset"; - $skipping = 1 unless exists $defs{$1}; - next; - }; - - /^\@ifclear\s+([a-zA-Z0-9_-]+)/ and do { - push @endwstack, $endw; - push @skstack, $skipping; - $endw = "ifclear"; - $skipping = 1 if exists $defs{$1}; - next; - }; - - /^\@(ignore|menu|iftex|ifhtml|ifnothtml)\b/ and do { - push @endwstack, $endw; - push @skstack, $skipping; - $endw = $1; - $skipping = $endw !~ /ifnothtml/; - next; - }; - - next if $skipping; - - # Character entities. First the ones that can be replaced by raw text - # or discarded outright: - s/\@copyright\{\}/(c)/g; - s/\@dots\{\}/.../g; - s/\@enddots\{\}/..../g; - s/\@([.!? ])/$1/g; - s/\@[:-]//g; - s/\@bullet(?:\{\})?/*/g; - s/\@TeX\{\}/TeX/g; - s/\@pounds\{\}/\#/g; - s/\@minus(?:\{\})?/-/g; - - # Now the ones that have to be replaced by special escapes - # (which will be turned back into text by unmunge()) - s/&/&/g; - s/\@\{/{/g; - s/\@\}/}/g; - s/\@\@/&at;/g; - - # Inside a verbatim block, handle @var specially. - if ($shift ne "") { - s/\@var\{([^\}]*)\}/<$1>/g; - } - - # POD doesn't interpret E<> inside a verbatim block. - if ($shift eq "") { - s/</</g; - s/>/>/g; - } else { - s/</</g; - s/>/>/g; - } - - # Single line command handlers. - - /^\@(?:section|unnumbered|unnumberedsec|center|heading)\s+(.+)$/ - and $_ = "\n=head2 $1\n"; - /^\@(?:subsection|subheading)\s+(.+)$/ - and $_ = "\n=head3 $1\n"; - /^\@(?:subsubsection|subsubheading)\s+(.+)$/ - and $_ = "\n=head4 $1\n"; - - # Block command handlers: - /^\@itemize\s*(\@[a-z]+|\*|-)?/ and do { - push @endwstack, $endw; - push @icstack, $ic; - $ic = $1 ? $1 : "*"; - $_ = "\n=over 4\n"; - $endw = "itemize"; - }; - - /^\@enumerate(?:\s+([a-zA-Z0-9]+))?/ and do { - push @endwstack, $endw; - push @icstack, $ic; - if (defined $1) { - $ic = $1 . "."; - } else { - $ic = "1."; - } - $_ = "\n=over 4\n"; - $endw = "enumerate"; - }; - - /^\@((?:multi|[fv])?table)\s+(\@[a-z]+)/ and do { - push @endwstack, $endw; - push @icstack, $ic; - $endw = $1; - $ic = $2; - $ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env|command)/B/; - $ic =~ s/\@(?:code|kbd)/C/; - $ic =~ s/\@(?:dfn|var|emph|cite|i)/I/; - $ic =~ s/\@(?:file)/F/; - $ic =~ s/\@(?:columnfractions)//; - $_ = "\n=over 4\n"; - }; - - /^\@((?:small)?example|display)/ and do { - push @endwstack, $endw; - $endw = $1; - $shift = "\t"; - $_ = ""; # need a paragraph break - }; - - /^\@item\s+(.*\S)\s*$/ and $endw eq "multitable" and do { - my $columns = $1; - $columns =~ s/\@tab/ : /; - - $_ = "\n=item B<". $columns .">\n"; - }; - - /^\@tab\s+(.*\S)\s*$/ and $endw eq "multitable" and do { - my $columns = $1; - $columns =~ s/\@tab/ : /; - - $_ = " : ". $columns; - $chapter =~ s/\n+\s+$//; - }; - - /^\@itemx?\s*(.+)?$/ and do { - if (defined $1) { - # Entity escapes prevent munging by the <> processing below. - $_ = "\n=item $ic\<$1\>\n"; - } else { - $_ = "\n=item $ic\n"; - $ic =~ y/A-Ya-y/B-Zb-z/; - $ic =~ s/(\d+)/$1 + 1/eg; - } - }; - - $chapter .= $shift.$_."\n"; -} -# End of current file. -close($inf); -$inf = pop @instack; -} - -die "No filename or title\n" unless defined $fn && defined $tl; - -$chapters{NAME} = "$fn \- $tl\n"; -$chapters{FOOTNOTES} .= "=back\n" if exists $chapters{FOOTNOTES}; - -unshift @chapters_sequence, "NAME"; -for $chapter (@chapters_sequence) { - if (exists $chapters{$chapter}) { - $head = uc($chapter); - print "=head1 $head\n\n"; - print scalar unmunge ($chapters{$chapter}); - print "\n"; - } -} - -sub usage -{ - die "usage: $0 [-D toggle...] [infile [outfile]]\n"; -} - -sub postprocess -{ - local $_ = $_[0]; - - # @value{foo} is replaced by whatever 'foo' is defined as. - while (m/(\@value\{([a-zA-Z0-9_-]+)\})/g) { - if (! exists $defs{$2}) { - print STDERR "Option $2 not defined\n"; - s/\Q$1\E//; - } else { - $value = $defs{$2}; - s/\Q$1\E/$value/; - } - } - - # Formatting commands. - # Temporary escape for @r. - s/\@r\{([^\}]*)\}/R<$1>/g; - s/\@(?:dfn|var|emph|cite|i)\{([^\}]*)\}/I<$1>/g; - s/\@(?:code|kbd)\{([^\}]*)\}/C<$1>/g; - s/\@(?:gccoptlist|samp|strong|key|option|env|command|b)\{([^\}]*)\}/B<$1>/g; - s/\@sc\{([^\}]*)\}/\U$1/g; - s/\@file\{([^\}]*)\}/F<$1>/g; - s/\@w\{([^\}]*)\}/S<$1>/g; - s/\@(?:dmn|math)\{([^\}]*)\}/$1/g; - - # Cross references are thrown away, as are @noindent and @refill. - # (@noindent is impossible in .pod, and @refill is unnecessary.) - # @* is also impossible in .pod; we discard it and any newline that - # follows it. Similarly, our macro @gol must be discarded. - - s/\@anchor{(?:[^\}]*)\}//g; - s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g; - s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g; - s/;\s+\@pxref\{(?:[^\}]*)\}//g; - s/\@ref\{(?:[^,\}]*,)(?:[^,\}]*,)([^,\}]*).*\}/$1/g; - s/\@ref\{([^\}]*)\}/$1/g; - s/\@noindent\s*//g; - s/\@refill//g; - s/\@gol//g; - s/\@\*\s*\n?//g; - - # @uref can take one, two, or three arguments, with different - # semantics each time. @url and @email are just like @uref with - # one argument, for our purposes. - s/\@(?:uref|url|email)\{([^\},]*),?[^\}]*\}/<B<$1>>/g; - s/\@uref\{([^\},]*),([^\},]*)\}/$2 (C<$1>)/g; - s/\@uref\{([^\},]*),([^\},]*),([^\},]*)\}/$3/g; - - # Turn B<blah I<blah> blah> into B<blah> I<blah> B<blah> to - # match Texinfo semantics of @emph inside @samp. Also handle @r - # inside bold. - s/</</g; - s/>/>/g; - 1 while s/B<((?:[^<>]|I<[^<>]*>)*)R<([^>]*)>/B<$1>${2}B</g; - 1 while (s/B<([^<>]*)I<([^>]+)>/B<$1>I<$2>B</g); - 1 while (s/I<([^<>]*)B<([^>]+)>/I<$1>B<$2>I</g); - s/[BI]<>//g; - s/([BI])<(\s+)([^>]+)>/$2$1<$3>/g; - s/([BI])<([^>]+?)(\s+)>/$1<$2>$3/g; - - # Extract footnotes. This has to be done after all other - # processing because otherwise the regexp will choke on formatting - # inside @footnote. - while (/\@footnote/g) { - s/\@footnote\{([^\}]+)\}/[$fnno]/; - add_footnote($1, $fnno); - $fnno++; - } - - return $_; -} - -sub unmunge -{ - # Replace escaped symbols with their equivalents. - local $_ = $_[0]; - - s/</E<lt>/g; - s/>/E<gt>/g; - s/{/\{/g; - s/}/\}/g; - s/&at;/\@/g; - s/&/&/g; - return $_; -} - -sub add_footnote -{ - unless (exists $chapters{FOOTNOTES}) { - $chapters{FOOTNOTES} = "\n=over 4\n\n"; - } - - $chapters{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++; - $chapters{FOOTNOTES} .= $_[0]; - $chapters{FOOTNOTES} .= "\n\n"; -} - -# stolen from Symbol.pm -{ - my $genseq = 0; - sub gensym - { - my $name = "GEN" . $genseq++; - my $ref = \*{$name}; - delete $::{$name}; - return $ref; - } -} |
