diff options
Diffstat (limited to 'rotord/libavexporter.h')
| -rw-r--r-- | rotord/libavexporter.h | 650 |
1 files changed, 0 insertions, 650 deletions
diff --git a/rotord/libavexporter.h b/rotord/libavexporter.h deleted file mode 100644 index deb71ab..0000000 --- a/rotord/libavexporter.h +++ /dev/null @@ -1,650 +0,0 @@ -/* - * Copyright (c) 2003 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. -*/ - -/* - * @file - * libavformat API example. - * - * Output a media file in any supported libavformat format. - * The default codecs are used. - * @example doc/examples/muxing.c -*/ - - - // - //http://stackoverflow.com/questions/14871916/can-i-concurrently-read-an-earlier-section-of-an-mp4-file-while-streaming-writin - // - -#include <stdlib.h> -#include <stdio.h> -#include <string> -#include <math.h> -#include <iostream> - -extern "C" { - #include <libavutil/mathematics.h> - #include <libavformat/avformat.h> - #include <libswscale/swscale.h> -} - -// 5 seconds stream duration // -#define STREAM_DURATION 200.0 -#define STREAM_FRAME_RATE 25 // 25 images/s // -#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) -#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P // default pix_fmt // - -namespace libav { - - - static int sws_flags = SWS_BICUBIC; - - //************************************************************// - // audio output // - - static float t, tincr, tincr2; - static int audio_input_frame_size; - - // Add an output stream. // - static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, - enum AVCodecID codec_id) - { - AVCodecContext *c; - AVStream *st; - - // find the encoder // - *codec = avcodec_find_encoder(codec_id); - if (!(*codec)) { - //fprintf(stderr, "Could not find encoder for '%s'\n", - // avcodec_get_name(codec_id)); - exit(1); - } - - st = avformat_new_stream(oc, *codec); - if (!st) { - //fprintf(stderr, "Could not allocate stream\n"); - exit(1); - } - st->id = oc->nb_streams-1; - c = st->codec; - - switch ((*codec)->type) { - case AVMEDIA_TYPE_AUDIO: - st->id = 1; - c->sample_fmt = AV_SAMPLE_FMT_S16; - c->bit_rate = 64000; - c->sample_rate = 44100; - c->channels = 2; - c->channel_layout=AV_CH_LAYOUT_STEREO; - break; - - case AVMEDIA_TYPE_VIDEO: - c->codec_id = codec_id; - - c->bit_rate = 400000; - // Resolution must be a multiple of two. // - c->width = 352; - c->height = 288; - // timebase: This is the fundamental unit of time (in seconds) in terms - // * of which frame timestamps are represented. For fixed-fps content, - // * timebase should be 1/framerate and timestamp increments should be - // * identical to 1. // - c->time_base.den = STREAM_FRAME_RATE; - c->time_base.num = 1; - c->gop_size = 12; // emit one intra frame every twelve frames at most // - c->pix_fmt = STREAM_PIX_FMT; - if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { - // just for testing, we also add B frames // - c->max_b_frames = 2; - } - if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) { - // Needed to avoid using macroblocks in which some coeffs overflow. - // * This does not happen with normal video, it just happens here as - // * the motion of the chroma plane does not match the luma plane. // - c->mb_decision = 2; - } - break; - - default: - break; - } - - // Some formats want stream headers to be separate. // - if (oc->oformat->flags & AVFMT_GLOBALHEADER) - c->flags |= CODEC_FLAG_GLOBAL_HEADER; - - return st; - } - - //************************************************************// - // audio output // - - - static int open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) - { - AVCodecContext *c; - int ret; - - c = st->codec; - - // open it // - ret = avcodec_open2(c, codec, NULL); - if (ret < 0) { - //fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret)); - exit(1); - } - - // init signal generator // - t = 0; - tincr = 2 * M_PI * 110.0 / c->sample_rate; - // increment frequency by 110 Hz per second // - tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; - - if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) - audio_input_frame_size = 10000; - else - audio_input_frame_size = c->frame_size; - - /* - samples = av_malloc(audio_input_frame_size * - av_get_bytes_per_sample(c->sample_fmt) * - c->channels); - if (!samples) { - //fprintf(stderr, "Could not allocate audio samples buffer\n"); - exit(1); - } - */ - return audio_input_frame_size; - } - - // Prepare a 16 bit dummy audio frame of 'frame_size' samples and - // * 'nb_channels' channels. // - static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) - { - int j, i, v; - int16_t *q; - - q = samples; - for (j = 0; j < frame_size; j++) { - v = (int)(sin(t) * 10000); - for (i = 0; i < nb_channels; i++) - *q++ = v; - t += tincr; - tincr += tincr2; - } - } - - static void write_audio_frame(AVFormatContext *oc, AVStream *st,uint16_t *samples) - { - AVCodecContext *c; - AVPacket pkt = { 0 }; // data and size must be 0; - AVFrame *frame = avcodec_alloc_frame(); - int got_packet, ret; - - av_init_packet(&pkt); - c = st->codec; - - //get_audio_frame(samples, audio_input_frame_size, c->channels); - frame->nb_samples = audio_input_frame_size; - uint8_t *sampleptr; - int bufsize=audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) *c->channels; - if (samples) { - sampleptr=(uint8_t*)samples; - } - else { - sampleptr=new uint8_t[bufsize]; - memset(sampleptr,0,bufsize); - } - - avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, - sampleptr, - audio_input_frame_size * - av_get_bytes_per_sample(c->sample_fmt) * - c->channels, 0); //; - //frame->sample_rate=44100; //hard coded input rate- nope, this doesn't help - //frame->format=AV_SAMPLE_FMT_S16P; - //?? why is ffmpeg reporting fltp as the sample format??? doesn't seem to have an effect to change this though - ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); - if (!samples) { - delete[] sampleptr; - } - if (ret < 0) { - //fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret)); - exit(1); - } - - if (!got_packet) - return; - - pkt.stream_index = st->index; - - // Write the compressed frame to the media file. // - ret = av_interleaved_write_frame(oc, &pkt); - if (ret != 0) { - //fprintf(stderr, "Error while writing audio frame: %s\n", - // av_err2str(ret)); - exit(1); - } - avcodec_free_frame(&frame); - } - - static void write_audio_frame(AVFormatContext *oc, AVStream *st,AVPacket *pkt) - { - /* - AVCodecContext *c; - AVPacket pkt = { 0 }; // data and size must be 0; - AVFrame *frame = avcodec_alloc_frame(); - int got_packet, ret; - - av_init_packet(&pkt); - c = st->codec; - - //get_audio_frame(samples, audio_input_frame_size, c->channels); - frame->nb_samples = audio_input_frame_size; - uint8_t *sampleptr; - int bufsize=audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) *c->channels; - if (samples) { - sampleptr=(uint8_t*)samples; - } - else { - sampleptr=new uint8_t[bufsize]; - memset(sampleptr,0,bufsize); - } - avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, - sampleptr, - audio_input_frame_size * - av_get_bytes_per_sample(c->sample_fmt) * - c->channels, 1); - - ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); - if (!samples) { - free(sampleptr); - } - if (ret < 0) { - //fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret)); - exit(1); - } - - if (!got_packet) - return; - */ - - pkt->stream_index = st->index; - - // Write the compressed frame to the media file. // - int ret = av_interleaved_write_frame(oc, pkt); - if (ret != 0) { - //fprintf(stderr, "Error while writing audio frame: %s\n", - // av_err2str(ret)); - exit(1); - } - //avcodec_free_frame(&frame); - av_free_packet(pkt); - } - - - static void close_audio(AVFormatContext *oc, AVStream *st) - { - avcodec_close(st->codec); - - - } - - //************************************************************// - // video output // - - static AVFrame *frame; - static AVPicture src_picture, dst_picture; - static int frame_count; - static uint8_t *outPixels; - - static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st) - { - int ret; - AVCodecContext *c = st->codec; - - // open the codec // - ret = avcodec_open2(c, codec, NULL); - if (ret < 0) { - //fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret)); - exit(1); - } - - // allocate and init a re-usable frame // - frame = avcodec_alloc_frame(); - if (!frame) { - //fprintf(stderr, "Could not allocate video frame\n"); - exit(1); - } - - // Allocate the encoded raw picture. // - ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height); - if (ret < 0) { - //fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret)); - exit(1); - } - - // If the output format is not YUV420P, then a temporary YUV420P - // * picture is needed too. It is then converted to the required - // * output format. // - if (c->pix_fmt != AV_PIX_FMT_YUV420P) { - ret = avpicture_alloc(&src_picture, AV_PIX_FMT_RGB24, c->width, c->height); - if (ret < 0) { - //fprintf(stderr, "Could not allocate temporary picture: %s\n", - // av_err2str(ret)); - exit(1); - } - } - - // copy data and linesize picture pointers to frame // - *((AVPicture *)frame) = dst_picture; - - outPixels = (uint8_t*)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, st->codec->width,st->codec->height)); - } - - // Prepare a dummy image. // - static void fill_yuv_image(AVPicture *pict, int frame_index, - int width, int height) - { - int x, y, i; - - i = frame_index; - - // Y // - for (y = 0; y < height; y++) - for (x = 0; x < width; x++) - pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; - - // Cb and Cr // - for (y = 0; y < height / 2; y++) { - for (x = 0; x < width / 2; x++) { - pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; - pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; - } - } - } - - static void write_video_frame(AVFormatContext *oc, AVStream *st, uint8_t *pixels) - { - int ret; - static struct SwsContext *sws_ctx; - AVCodecContext *c = st->codec; - -/* - if (frame_count >= STREAM_NB_FRAMES) { - // No more frames to compress. The codec has a latency of a few - // * frames if using B-frames, so we get the last frames by - // * passing the same picture again. // - } else { - if (c->pix_fmt != AV_PIX_FMT_YUV420P) { - // as we only generate a YUV420P picture, we must convert it - // * to the codec pixel format if needed // - if (!sws_ctx) { - sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P, - c->width, c->height, c->pix_fmt, - sws_flags, NULL, NULL, NULL); - if (!sws_ctx) { - //fprintf(stderr, - // "Could not initialize the conversion context\n"); - exit(1); - } - } - fill_yuv_image(&src_picture, frame_count, c->width, c->height); - sws_scale(sws_ctx, - (const uint8_t * const *)src_picture.data, src_picture.linesize, - 0, c->height, dst_picture.data, dst_picture.linesize); - } else { - fill_yuv_image(&dst_picture, frame_count, c->width, c->height); - } - } -*/ - //always convert RGB to YUV - //should be context allocated once per render instead of per frame?? - // - // - sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24, - c->width, c->height, AV_PIX_FMT_YUV420P, - sws_flags, NULL, NULL, NULL); - - avpicture_fill(&src_picture, pixels, PIX_FMT_RGB24, c->width,c->height); - //avpicture_fill(&dst_picture, outPixels, PIX_FMT_YUV420P, c->width,c->height); - - sws_scale(sws_ctx, src_picture.data, src_picture.linesize, 0, c->height, dst_picture.data, dst_picture.linesize); - //fill_yuv_image(&dst_picture, frame_count, c->width, c->height); - if (oc->oformat->flags & AVFMT_RAWPICTURE) { - // Raw video case - directly store the picture in the packet // - AVPacket pkt; - av_init_packet(&pkt); - - pkt.flags |= AV_PKT_FLAG_KEY; - pkt.stream_index = st->index; - pkt.data = dst_picture.data[0]; - pkt.size = sizeof(AVPicture); - - ret = av_interleaved_write_frame(oc, &pkt); - } else { - AVPacket pkt = { 0 }; - int got_packet; - av_init_packet(&pkt); - - // encode the image // - ret = avcodec_encode_video2(c, &pkt, frame, &got_packet); - if (ret < 0) { - //fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); - exit(1); - } - // If size is zero, it means the image was buffered. // - - if (!ret && got_packet && pkt.size) { - pkt.stream_index = st->index; - - // Write the compressed frame to the media file. // - ret = av_interleaved_write_frame(oc, &pkt); - } else { - ret = 0; - } - } - - // - // added 22 may in memory leak run - // - sws_freeContext(sws_ctx); //should be done once per render instead of per frame?? - - if (ret != 0) { - //fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); - exit(1); - } - frame_count++; - - //avcodec_free_frame(&frame); - } - - static void close_video(AVFormatContext *oc, AVStream *st) - { - avcodec_close(st->codec); - av_free(src_picture.data[0]); - av_free(dst_picture.data[0]); - av_free(frame); - av_free(outPixels); //SIGSEV here??? - } - - class Exporter { - public: - bool setup(int w,int h, int bitRate, int frameRate, std::string container); - bool record(std::string filename); - bool encodeFrame(unsigned char *pixels, uint16_t *samples); - bool encodeFrame(unsigned char *pixels,AVPacket *audiopkt); - bool encodeFrame(unsigned char *pixels); - bool encodeFrame(uint16_t *samples); - void finishRecord(); - int get_audio_framesize(){return audioframesize;}; - float get_audio_step(){return audiostep;}; - - private: - AVOutputFormat *fmt; - AVFormatContext *oc; - AVStream *audio_st, *video_st; - AVCodec *audio_codec, *video_codec; - double audio_pts, video_pts; - - int audioframesize; - float audiostep; - int w; - int h; - int bitRate; - int frameRate; - std::string container; - - int outputframe; - - }; - - //************************************************************// - // media file output // - -/* - int main(int argc, char **argv) - { - const char *filename; - AVOutputFormat *fmt; - AVFormatContext *oc; - AVStream *audio_st, *video_st; - AVCodec *audio_codec, *video_codec; - double audio_pts, video_pts; - int ret; - - // Initialize libavcodec, and register all codecs and formats. // - av_register_all(); - - if (argc != 2) { - printf("usage: %s output_file\n" - "API example program to output a media file with libavformat.\n" - "This program generates a synthetic audio and video stream, encodes and\n" - "muxes them into a file named output_file.\n" - "The output format is automatically guessed according to the file extension.\n" - "Raw images can also be output by using '%%d' in the filename.\n" - "\n", argv[0]); - return 1; - } - - filename = argv[1]; - - // allocate the output media context // - avformat_alloc_output_context2(&oc, NULL, NULL, filename); - if (!oc) { - printf("Could not deduce output format from file extension: using MPEG.\n"); - avformat_alloc_output_context2(&oc, NULL, "mpeg", filename); - } - if (!oc) { - return 1; - } - fmt = oc->oformat; - - // Add the audio and video streams using the default format codecs - // * and initialize the codecs. // - video_st = NULL; - audio_st = NULL; - - if (fmt->video_codec != AV_CODEC_ID_NONE) { - video_st = add_stream(oc, &video_codec, fmt->video_codec); - } - if (fmt->audio_codec != AV_CODEC_ID_NONE) { - audio_st = add_stream(oc, &audio_codec, fmt->audio_codec); - } - - // Now that all the parameters are set, we can open the audio and - * video codecs and allocate the necessary encode buffers. // - if (video_st) - open_video(oc, video_codec, video_st); - if (audio_st) - open_audio(oc, audio_codec, audio_st); - - av_dump_format(oc, 0, filename, 1); - - // open the output file, if needed // - if (!(fmt->flags & AVFMT_NOFILE)) { - ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); - if (ret < 0) { - fprintf(stderr, "Could not open '%s': %s\n", filename, - av_err2str(ret)); - return 1; - } - } - - // Write the stream header, if any. // - ret = avformat_write_header(oc, NULL); - if (ret < 0) { - fprintf(stderr, "Error occurred when opening output file: %s\n", - av_err2str(ret)); - return 1; - } - - if (frame) - frame->pts = 0; - for (;;) { - // Compute current audio and video time. // - if (audio_st) - audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; - else - audio_pts = 0.0; - - if (video_st) - video_pts = (double)video_st->pts.val * video_st->time_base.num / - video_st->time_base.den; - else - video_pts = 0.0; - - if ((!audio_st || audio_pts >= STREAM_DURATION) && - (!video_st || video_pts >= STREAM_DURATION)) - break; - - // write interleaved audio and video frames // - if (!video_st || (video_st && audio_st && audio_pts < video_pts)) { - write_audio_frame(oc, audio_st); - } else { - write_video_frame(oc, video_st); - frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base); - } - } - - // Write the trailer, if any. The trailer must be written before you - * close the CodecContexts open when you wrote the header; otherwise - * av_write_trailer() may try to use memory that was freed on - * av_codec_close(). // - av_write_trailer(oc); - - // Close each codec. // - if (video_st) - close_video(oc, video_st); - if (audio_st) - close_audio(oc, audio_st); - - if (!(fmt->flags & AVFMT_NOFILE)) - // Close the output file. // - avio_close(oc->pb); - - // free the stream // - avformat_free_context(oc); - - return 0; - } - */ -} |
