summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--rotord/libavexporter.cpp170
-rw-r--r--rotord/libavexporter.h650
-rwxr-xr-xrotord/libavwrapper.cpp1366
-rwxr-xr-xrotord/libavwrapper.h222
-rwxr-xr-xrotord/ofxMovieExporter.cpp426
-rwxr-xr-xrotord/ofxMovieExporter.h198
-rwxr-xr-xrotord/rotor.cpp75
-rwxr-xr-xrotord/rotor.h15
-rw-r--r--rotord/rotord.cbp9
-rw-r--r--rotord/settings.xml2
10 files changed, 83 insertions, 3050 deletions
diff --git a/rotord/libavexporter.cpp b/rotord/libavexporter.cpp
deleted file mode 100644
index 7b6d1cf..0000000
--- a/rotord/libavexporter.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
-#include "libavexporter.h"
-
-
-bool libav::Exporter::setup(int w,int h, int bitRate, int frameRate, std::string container){
- // Initialize libavcodec, and register all codecs and formats. //
- av_register_all();
-
-
- this->w=w;
- this->h=h;
- this->bitRate=bitRate;
- this->frameRate=frameRate;
- this->container=container;
-
- return true;
-}
-bool libav::Exporter::record(std::string filename){
-
- // allocate the output media context //
- avformat_alloc_output_context2(&oc, NULL, NULL, filename.c_str());
- if (!oc) {
- printf("Could not deduce output format from file extension: using MPEG.\n");
- avformat_alloc_output_context2(&oc, NULL, "mpeg", filename.c_str());
- }
- if (!oc) {
- return false;
- }
- fmt = oc->oformat;
-
- // Add the audio and video streams using the default format codecs
- // * and initialize the codecs. //
- video_st = NULL;
- audio_st = NULL;
-
- if (fmt->video_codec != AV_CODEC_ID_NONE) {
- video_st = add_stream(oc, &video_codec, fmt->video_codec);
- }
- if (fmt->audio_codec != AV_CODEC_ID_NONE) {
- audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
- }
-
- //set initial video params
- video_st->codec->width=w;
- video_st->codec->height=h;
- video_st->codec->time_base.num = 1;//codecCtx->ticks_per_frame;
- video_st->codec->time_base.den = frameRate;
- video_st->time_base = video_st->codec->time_base;
- //audioStream->time_base = codecCtx->time_base; //???has the capability of crashing
-
- video_st->codec->gop_size = 10; /* emit one intra frame every ten frames */
- video_st->codec->pix_fmt = PIX_FMT_YUV420P;
-
- // Now that all the parameters are set, we can open the audio and
- // * video codecs and allocate the necessary encode buffers. //
- if (video_st)
- open_video(oc, video_codec, video_st);
- if (audio_st) {
- audioframesize=open_audio(oc, audio_codec, audio_st);
- audiostep=((float)audioframesize)/(audio_st->codec->sample_rate);
- std::cerr << "opened audio codec with "<<audioframesize<<" frame size and "<<audiostep<<" seconds per frame"<<std::endl;
- }
-
-
- av_dump_format(oc, 0, filename.c_str(), 1);
-
- // open the output file, if needed //
- if (!(fmt->flags & AVFMT_NOFILE)) {
- int ret = avio_open(&oc->pb, filename.c_str(), AVIO_FLAG_WRITE);
- if (ret < 0) {
- std::cerr <<"Could not open " << filename.c_str() << std::endl;
- return false;
- }
- }
-
- // Write the stream header, if any. //
- int ret = avformat_write_header(oc, NULL);
- if (ret < 0) {
- //std::cerr <<"Error occurred when opening output file:" << av_err2str(ret) << std::endl;
- return false;
- }
-
- if (frame)
- frame->pts = 0;
-
- outputframe=0;
-
- return true;
-}
-bool libav::Exporter::encodeFrame(unsigned char *pixels,uint16_t *samples){
- // Compute current audio and video time. //
- if (audio_st)
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- else
- audio_pts = 0.0;
-
- if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num /
- video_st->time_base.den;
- else
- video_pts = 0.0;
-
- // write interleaved audio and video frames //
- if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
- write_audio_frame(oc, audio_st, samples);
- } else {
- write_video_frame(oc, video_st, pixels);
-
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- }
-
- //std::cerr << "encoded frame " << outputframe << std::endl;
- outputframe++;
-
- return true;
-}
-bool libav::Exporter::encodeFrame(unsigned char *pixels,AVPacket *audio){
- // Compute current audio and video time. //
- if (audio_st)
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- else
- audio_pts = 0.0;
-
- if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num /
- video_st->time_base.den;
- else
- video_pts = 0.0;
-
- // write interleaved audio and video frames //
- if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
- write_audio_frame(oc, audio_st, audio);
- } else {
- write_video_frame(oc, video_st, pixels);
-
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- }
-
- //std::cerr << "encoded frame " << outputframe << std::endl;
- outputframe++;
-
- return true;
-}
-bool libav::Exporter::encodeFrame(unsigned char *pixels){
- video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
- write_video_frame(oc, video_st, pixels);
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- outputframe++;
- return true;
-}
-bool libav::Exporter::encodeFrame(uint16_t *samples){
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- write_audio_frame(oc, audio_st, samples);
- return true;
-}
-void libav::Exporter::finishRecord(){
-
- av_write_trailer(oc);
- // Close each codec. //
- if (video_st)
- close_video(oc, video_st);
- if (audio_st)
- close_audio(oc, audio_st);
-
- if (!(fmt->flags & AVFMT_NOFILE))
- // Close the output file. //
- avio_close(oc->pb);
-
- // free the stream //
- avformat_free_context(oc);
-}
diff --git a/rotord/libavexporter.h b/rotord/libavexporter.h
deleted file mode 100644
index deb71ab..0000000
--- a/rotord/libavexporter.h
+++ /dev/null
@@ -1,650 +0,0 @@
-/*
- * Copyright (c) 2003 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
-*/
-
-/*
- * @file
- * libavformat API example.
- *
- * Output a media file in any supported libavformat format.
- * The default codecs are used.
- * @example doc/examples/muxing.c
-*/
-
-
- //
- //http://stackoverflow.com/questions/14871916/can-i-concurrently-read-an-earlier-section-of-an-mp4-file-while-streaming-writin
- //
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string>
-#include <math.h>
-#include <iostream>
-
-extern "C" {
- #include <libavutil/mathematics.h>
- #include <libavformat/avformat.h>
- #include <libswscale/swscale.h>
-}
-
-// 5 seconds stream duration //
-#define STREAM_DURATION 200.0
-#define STREAM_FRAME_RATE 25 // 25 images/s //
-#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
-#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P // default pix_fmt //
-
-namespace libav {
-
-
- static int sws_flags = SWS_BICUBIC;
-
- //************************************************************//
- // audio output //
-
- static float t, tincr, tincr2;
- static int audio_input_frame_size;
-
- // Add an output stream. //
- static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
- enum AVCodecID codec_id)
- {
- AVCodecContext *c;
- AVStream *st;
-
- // find the encoder //
- *codec = avcodec_find_encoder(codec_id);
- if (!(*codec)) {
- //fprintf(stderr, "Could not find encoder for '%s'\n",
- // avcodec_get_name(codec_id));
- exit(1);
- }
-
- st = avformat_new_stream(oc, *codec);
- if (!st) {
- //fprintf(stderr, "Could not allocate stream\n");
- exit(1);
- }
- st->id = oc->nb_streams-1;
- c = st->codec;
-
- switch ((*codec)->type) {
- case AVMEDIA_TYPE_AUDIO:
- st->id = 1;
- c->sample_fmt = AV_SAMPLE_FMT_S16;
- c->bit_rate = 64000;
- c->sample_rate = 44100;
- c->channels = 2;
- c->channel_layout=AV_CH_LAYOUT_STEREO;
- break;
-
- case AVMEDIA_TYPE_VIDEO:
- c->codec_id = codec_id;
-
- c->bit_rate = 400000;
- // Resolution must be a multiple of two. //
- c->width = 352;
- c->height = 288;
- // timebase: This is the fundamental unit of time (in seconds) in terms
- // * of which frame timestamps are represented. For fixed-fps content,
- // * timebase should be 1/framerate and timestamp increments should be
- // * identical to 1. //
- c->time_base.den = STREAM_FRAME_RATE;
- c->time_base.num = 1;
- c->gop_size = 12; // emit one intra frame every twelve frames at most //
- c->pix_fmt = STREAM_PIX_FMT;
- if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
- // just for testing, we also add B frames //
- c->max_b_frames = 2;
- }
- if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
- // Needed to avoid using macroblocks in which some coeffs overflow.
- // * This does not happen with normal video, it just happens here as
- // * the motion of the chroma plane does not match the luma plane. //
- c->mb_decision = 2;
- }
- break;
-
- default:
- break;
- }
-
- // Some formats want stream headers to be separate. //
- if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
- return st;
- }
-
- //************************************************************//
- // audio output //
-
-
- static int open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
- {
- AVCodecContext *c;
- int ret;
-
- c = st->codec;
-
- // open it //
- ret = avcodec_open2(c, codec, NULL);
- if (ret < 0) {
- //fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
- exit(1);
- }
-
- // init signal generator //
- t = 0;
- tincr = 2 * M_PI * 110.0 / c->sample_rate;
- // increment frequency by 110 Hz per second //
- tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
-
- if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
- audio_input_frame_size = 10000;
- else
- audio_input_frame_size = c->frame_size;
-
- /*
- samples = av_malloc(audio_input_frame_size *
- av_get_bytes_per_sample(c->sample_fmt) *
- c->channels);
- if (!samples) {
- //fprintf(stderr, "Could not allocate audio samples buffer\n");
- exit(1);
- }
- */
- return audio_input_frame_size;
- }
-
- // Prepare a 16 bit dummy audio frame of 'frame_size' samples and
- // * 'nb_channels' channels. //
- static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
- {
- int j, i, v;
- int16_t *q;
-
- q = samples;
- for (j = 0; j < frame_size; j++) {
- v = (int)(sin(t) * 10000);
- for (i = 0; i < nb_channels; i++)
- *q++ = v;
- t += tincr;
- tincr += tincr2;
- }
- }
-
- static void write_audio_frame(AVFormatContext *oc, AVStream *st,uint16_t *samples)
- {
- AVCodecContext *c;
- AVPacket pkt = { 0 }; // data and size must be 0;
- AVFrame *frame = avcodec_alloc_frame();
- int got_packet, ret;
-
- av_init_packet(&pkt);
- c = st->codec;
-
- //get_audio_frame(samples, audio_input_frame_size, c->channels);
- frame->nb_samples = audio_input_frame_size;
- uint8_t *sampleptr;
- int bufsize=audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) *c->channels;
- if (samples) {
- sampleptr=(uint8_t*)samples;
- }
- else {
- sampleptr=new uint8_t[bufsize];
- memset(sampleptr,0,bufsize);
- }
-
- avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- sampleptr,
- audio_input_frame_size *
- av_get_bytes_per_sample(c->sample_fmt) *
- c->channels, 0); //;
- //frame->sample_rate=44100; //hard coded input rate- nope, this doesn't help
- //frame->format=AV_SAMPLE_FMT_S16P;
- //?? why is ffmpeg reporting fltp as the sample format??? doesn't seem to have an effect to change this though
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
- if (!samples) {
- delete[] sampleptr;
- }
- if (ret < 0) {
- //fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
- exit(1);
- }
-
- if (!got_packet)
- return;
-
- pkt.stream_index = st->index;
-
- // Write the compressed frame to the media file. //
- ret = av_interleaved_write_frame(oc, &pkt);
- if (ret != 0) {
- //fprintf(stderr, "Error while writing audio frame: %s\n",
- // av_err2str(ret));
- exit(1);
- }
- avcodec_free_frame(&frame);
- }
-
- static void write_audio_frame(AVFormatContext *oc, AVStream *st,AVPacket *pkt)
- {
- /*
- AVCodecContext *c;
- AVPacket pkt = { 0 }; // data and size must be 0;
- AVFrame *frame = avcodec_alloc_frame();
- int got_packet, ret;
-
- av_init_packet(&pkt);
- c = st->codec;
-
- //get_audio_frame(samples, audio_input_frame_size, c->channels);
- frame->nb_samples = audio_input_frame_size;
- uint8_t *sampleptr;
- int bufsize=audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) *c->channels;
- if (samples) {
- sampleptr=(uint8_t*)samples;
- }
- else {
- sampleptr=new uint8_t[bufsize];
- memset(sampleptr,0,bufsize);
- }
- avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- sampleptr,
- audio_input_frame_size *
- av_get_bytes_per_sample(c->sample_fmt) *
- c->channels, 1);
-
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
- if (!samples) {
- free(sampleptr);
- }
- if (ret < 0) {
- //fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
- exit(1);
- }
-
- if (!got_packet)
- return;
- */
-
- pkt->stream_index = st->index;
-
- // Write the compressed frame to the media file. //
- int ret = av_interleaved_write_frame(oc, pkt);
- if (ret != 0) {
- //fprintf(stderr, "Error while writing audio frame: %s\n",
- // av_err2str(ret));
- exit(1);
- }
- //avcodec_free_frame(&frame);
- av_free_packet(pkt);
- }
-
-
- static void close_audio(AVFormatContext *oc, AVStream *st)
- {
- avcodec_close(st->codec);
-
-
- }
-
- //************************************************************//
- // video output //
-
- static AVFrame *frame;
- static AVPicture src_picture, dst_picture;
- static int frame_count;
- static uint8_t *outPixels;
-
- static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
- {
- int ret;
- AVCodecContext *c = st->codec;
-
- // open the codec //
- ret = avcodec_open2(c, codec, NULL);
- if (ret < 0) {
- //fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
- exit(1);
- }
-
- // allocate and init a re-usable frame //
- frame = avcodec_alloc_frame();
- if (!frame) {
- //fprintf(stderr, "Could not allocate video frame\n");
- exit(1);
- }
-
- // Allocate the encoded raw picture. //
- ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
- if (ret < 0) {
- //fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
- exit(1);
- }
-
- // If the output format is not YUV420P, then a temporary YUV420P
- // * picture is needed too. It is then converted to the required
- // * output format. //
- if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
- ret = avpicture_alloc(&src_picture, AV_PIX_FMT_RGB24, c->width, c->height);
- if (ret < 0) {
- //fprintf(stderr, "Could not allocate temporary picture: %s\n",
- // av_err2str(ret));
- exit(1);
- }
- }
-
- // copy data and linesize picture pointers to frame //
- *((AVPicture *)frame) = dst_picture;
-
- outPixels = (uint8_t*)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, st->codec->width,st->codec->height));
- }
-
- // Prepare a dummy image. //
- static void fill_yuv_image(AVPicture *pict, int frame_index,
- int width, int height)
- {
- int x, y, i;
-
- i = frame_index;
-
- // Y //
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
-
- // Cb and Cr //
- for (y = 0; y < height / 2; y++) {
- for (x = 0; x < width / 2; x++) {
- pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
- pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
- }
- }
- }
-
- static void write_video_frame(AVFormatContext *oc, AVStream *st, uint8_t *pixels)
- {
- int ret;
- static struct SwsContext *sws_ctx;
- AVCodecContext *c = st->codec;
-
-/*
- if (frame_count >= STREAM_NB_FRAMES) {
- // No more frames to compress. The codec has a latency of a few
- // * frames if using B-frames, so we get the last frames by
- // * passing the same picture again. //
- } else {
- if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
- // as we only generate a YUV420P picture, we must convert it
- // * to the codec pixel format if needed //
- if (!sws_ctx) {
- sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
- c->width, c->height, c->pix_fmt,
- sws_flags, NULL, NULL, NULL);
- if (!sws_ctx) {
- //fprintf(stderr,
- // "Could not initialize the conversion context\n");
- exit(1);
- }
- }
- fill_yuv_image(&src_picture, frame_count, c->width, c->height);
- sws_scale(sws_ctx,
- (const uint8_t * const *)src_picture.data, src_picture.linesize,
- 0, c->height, dst_picture.data, dst_picture.linesize);
- } else {
- fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
- }
- }
-*/
- //always convert RGB to YUV
- //should be context allocated once per render instead of per frame??
- //
- //
- sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
- c->width, c->height, AV_PIX_FMT_YUV420P,
- sws_flags, NULL, NULL, NULL);
-
- avpicture_fill(&src_picture, pixels, PIX_FMT_RGB24, c->width,c->height);
- //avpicture_fill(&dst_picture, outPixels, PIX_FMT_YUV420P, c->width,c->height);
-
- sws_scale(sws_ctx, src_picture.data, src_picture.linesize, 0, c->height, dst_picture.data, dst_picture.linesize);
- //fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
- if (oc->oformat->flags & AVFMT_RAWPICTURE) {
- // Raw video case - directly store the picture in the packet //
- AVPacket pkt;
- av_init_packet(&pkt);
-
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = st->index;
- pkt.data = dst_picture.data[0];
- pkt.size = sizeof(AVPicture);
-
- ret = av_interleaved_write_frame(oc, &pkt);
- } else {
- AVPacket pkt = { 0 };
- int got_packet;
- av_init_packet(&pkt);
-
- // encode the image //
- ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
- if (ret < 0) {
- //fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
- exit(1);
- }
- // If size is zero, it means the image was buffered. //
-
- if (!ret && got_packet && pkt.size) {
- pkt.stream_index = st->index;
-
- // Write the compressed frame to the media file. //
- ret = av_interleaved_write_frame(oc, &pkt);
- } else {
- ret = 0;
- }
- }
-
- //
- // added 22 may in memory leak run
- //
- sws_freeContext(sws_ctx); //should be done once per render instead of per frame??
-
- if (ret != 0) {
- //fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
- exit(1);
- }
- frame_count++;
-
- //avcodec_free_frame(&frame);
- }
-
- static void close_video(AVFormatContext *oc, AVStream *st)
- {
- avcodec_close(st->codec);
- av_free(src_picture.data[0]);
- av_free(dst_picture.data[0]);
- av_free(frame);
- av_free(outPixels); //SIGSEV here???
- }
-
- class Exporter {
- public:
- bool setup(int w,int h, int bitRate, int frameRate, std::string container);
- bool record(std::string filename);
- bool encodeFrame(unsigned char *pixels, uint16_t *samples);
- bool encodeFrame(unsigned char *pixels,AVPacket *audiopkt);
- bool encodeFrame(unsigned char *pixels);
- bool encodeFrame(uint16_t *samples);
- void finishRecord();
- int get_audio_framesize(){return audioframesize;};
- float get_audio_step(){return audiostep;};
-
- private:
- AVOutputFormat *fmt;
- AVFormatContext *oc;
- AVStream *audio_st, *video_st;
- AVCodec *audio_codec, *video_codec;
- double audio_pts, video_pts;
-
- int audioframesize;
- float audiostep;
- int w;
- int h;
- int bitRate;
- int frameRate;
- std::string container;
-
- int outputframe;
-
- };
-
- //************************************************************//
- // media file output //
-
-/*
- int main(int argc, char **argv)
- {
- const char *filename;
- AVOutputFormat *fmt;
- AVFormatContext *oc;
- AVStream *audio_st, *video_st;
- AVCodec *audio_codec, *video_codec;
- double audio_pts, video_pts;
- int ret;
-
- // Initialize libavcodec, and register all codecs and formats. //
- av_register_all();
-
- if (argc != 2) {
- printf("usage: %s output_file\n"
- "API example program to output a media file with libavformat.\n"
- "This program generates a synthetic audio and video stream, encodes and\n"
- "muxes them into a file named output_file.\n"
- "The output format is automatically guessed according to the file extension.\n"
- "Raw images can also be output by using '%%d' in the filename.\n"
- "\n", argv[0]);
- return 1;
- }
-
- filename = argv[1];
-
- // allocate the output media context //
- avformat_alloc_output_context2(&oc, NULL, NULL, filename);
- if (!oc) {
- printf("Could not deduce output format from file extension: using MPEG.\n");
- avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
- }
- if (!oc) {
- return 1;
- }
- fmt = oc->oformat;
-
- // Add the audio and video streams using the default format codecs
- // * and initialize the codecs. //
- video_st = NULL;
- audio_st = NULL;
-
- if (fmt->video_codec != AV_CODEC_ID_NONE) {
- video_st = add_stream(oc, &video_codec, fmt->video_codec);
- }
- if (fmt->audio_codec != AV_CODEC_ID_NONE) {
- audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
- }
-
- // Now that all the parameters are set, we can open the audio and
- * video codecs and allocate the necessary encode buffers. //
- if (video_st)
- open_video(oc, video_codec, video_st);
- if (audio_st)
- open_audio(oc, audio_codec, audio_st);
-
- av_dump_format(oc, 0, filename, 1);
-
- // open the output file, if needed //
- if (!(fmt->flags & AVFMT_NOFILE)) {
- ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
- if (ret < 0) {
- fprintf(stderr, "Could not open '%s': %s\n", filename,
- av_err2str(ret));
- return 1;
- }
- }
-
- // Write the stream header, if any. //
- ret = avformat_write_header(oc, NULL);
- if (ret < 0) {
- fprintf(stderr, "Error occurred when opening output file: %s\n",
- av_err2str(ret));
- return 1;
- }
-
- if (frame)
- frame->pts = 0;
- for (;;) {
- // Compute current audio and video time. //
- if (audio_st)
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- else
- audio_pts = 0.0;
-
- if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num /
- video_st->time_base.den;
- else
- video_pts = 0.0;
-
- if ((!audio_st || audio_pts >= STREAM_DURATION) &&
- (!video_st || video_pts >= STREAM_DURATION))
- break;
-
- // write interleaved audio and video frames //
- if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
- write_audio_frame(oc, audio_st);
- } else {
- write_video_frame(oc, video_st);
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- }
- }
-
- // Write the trailer, if any. The trailer must be written before you
- * close the CodecContexts open when you wrote the header; otherwise
- * av_write_trailer() may try to use memory that was freed on
- * av_codec_close(). //
- av_write_trailer(oc);
-
- // Close each codec. //
- if (video_st)
- close_video(oc, video_st);
- if (audio_st)
- close_audio(oc, audio_st);
-
- if (!(fmt->flags & AVFMT_NOFILE))
- // Close the output file. //
- avio_close(oc->pb);
-
- // free the stream //
- avformat_free_context(oc);
-
- return 0;
- }
- */
-}
diff --git a/rotord/libavwrapper.cpp b/rotord/libavwrapper.cpp
deleted file mode 100755
index 4b73c46..0000000
--- a/rotord/libavwrapper.cpp
+++ /dev/null
@@ -1,1366 +0,0 @@
- #include "libavwrapper.h"
-
-
-extern "C"
-{
-#include <libswscale/swscale.h>
-}
-
-/*
-#include <QNetworkReply>
-#include <QNetworkRequest>
-#include <QEventLoop>
-#include <QFileInfo>
-#include <QMutexLocker>
-#include <QDebug>
-*/
-
-#include <stdexcept>
-#include <iostream>
-#include <cassert>
-
-using namespace std;
-
-// Translated to C++ by Christopher Bruns May 2012
-// from ffmeg_adapt.c in whisk package by Nathan Clack, Mark Bolstadt, Michael Meeuwisse
-
-//QMutex decoder::mutex;
-
-// Avoid link error on some macs
-#ifdef __APPLE__
-extern "C" {
-#include <stdlib.h>
-#include <errno.h>
-// #include "compiler/compiler.h"
-
-/*
- * Darwin doesn't have posix_memalign(), provide a private
- * weak alternative
- */
- /*
-int __weak posix_memalign(void **ptr, size_t align, size_t size)
-{
- if (*ptr)
- return 0;
-
- return ENOMEM;
-}
-*/
-}
-#endif
-
-// Custom read function so FFMPEG does not need to read from a local file by name.
-// But rather from a stream derived from a URL or whatever.
-extern "C" {
-
-int readFunction(void* opaque, uint8_t* buf, int buf_size)
-{
- //QIODevice* stream = (QIODevice*)opaque;
- ifstream* stream = (ifstream*)opaque;
- //int numBytes =
- stream->read((char*)buf, (streamsize)buf_size);
- return stream->gcount(); //?? is this right
- //numBytes; //TODO work out
-}
-
-// http://cdry.wordpress.com/2009/09/09/using-custom-io-callbacks-with-ffmpeg/
-int64_t seekFunction(void* opaque, int64_t offset, int whence)
-{
- //QIODevice* stream = (QIODevice*)opaque;
- ifstream* stream = (ifstream*)opaque;
- if (stream == NULL)
- return -1;
- else if (whence == AVSEEK_SIZE)
- return -1; // "size of my handle in bytes"
- //else if (stream->isSequential())
- // return -1; // cannot seek a sequential stream //presume this would be certain kind of network stream
- else if (whence == SEEK_CUR) { // relative to start of file
- if (! stream->seekg(offset,ios_base::cur)) //stream->pos() + offset) )
- return -1;
- }
- else if (whence == SEEK_END) { // relative to end of file
- assert(offset < 0);
- if (! stream->seekg(offset,ios_base::end)) //stream->size() + offset) )
- return -1;
- }
- else if (whence == SEEK_SET) { // relative to start of file
- if (! stream->seekg(offset) )
- return -1;
- }
- else {
- assert(false);
- }
- return stream->tellg();
-}
-
-}
-
-
-/////////////////////////////
-// AVPacketWrapper methods //
-/////////////////////////////
-
-
-class AVPacketWrapper
-{
-public:
- AVPacketWrapper();
- virtual ~AVPacketWrapper();
- void free();
-
- AVPacket packet;
-};
-
-
-AVPacketWrapper::AVPacketWrapper()
-{
- packet.destruct = NULL;
-}
-
-/* virtual */
-AVPacketWrapper::~AVPacketWrapper()
-{
- free();
-}
-
-void AVPacketWrapper::free()
-{
- av_free_packet(&packet);
-}
-
-
-//bool libav::b_is_one_time_inited = false;
-
-/////////////////////////
-// decoder methods //
-/////////////////////////
-
-libav::decoder::decoder(PixelFormat pixelFormat)
- : isOpen(false)
-{
- initialize();
- format = pixelFormat;
-}
-
-/*
-decoder::decoder(QUrl url, PixelFormat pixelFormat)
- : isOpen(false)
-{
- //QMutexLocker lock(&decoder::mutex);
- initialize();
- format = pixelFormat;
- isOpen = open(url, pixelFormat);
-}
-*/
-
-/* virtual */
-libav::decoder::~decoder()
-{
- //QMutexLocker lock(&decoder::mutex);
- if (NULL != Sctx) {
- sws_freeContext(Sctx);
- Sctx = NULL;
- }
- if (NULL != pRaw) {
- av_free(pRaw);
- pRaw = NULL;
- }
- if (NULL != pFrameRGB) {
- av_free(pFrameRGB);
- pFrameRGB = NULL;
- }
- if (NULL != pCtx) {
- avcodec_close(pCtx);
- pCtx = NULL;
- }
- if (NULL != container) {
- avformat_close_input(&container);
- container = NULL;
- }
- if (NULL != buffer) {
- av_free(buffer);
- buffer = NULL;
- }
- if (NULL != blank) {
- av_free(blank);
- blank = NULL;
- }
- /*
- if (NULL != avioContext) {
- av_free(avioContext);
- avioContext = NULL;
- }
- */
- //QNetworkreply
- //if (reply != NULL) {
- // reply->deleteLater();
- // reply = NULL;
- //}
- // Don't need to free pCodec?
-}
-/*
-bool decoder::open(QUrl url, enum PixelFormat formatParam)
-{
- if (url.isEmpty())
- return false;
-
- // Is the movie source a local file?
- if (url.host() == "localhost")
- url.setHost("");
- QString fileName = url.toLocalFile();
- if ( (! fileName.isEmpty())
- && (QFileInfo(fileName).exists()) )
- {
- // return open(fileName, formatParam); // for testing only
-
- // Yes, the source is a local file
- fileStream.setFileName(fileName);
- // qDebug() << fileName;
- if (! fileStream.open(QIODevice::ReadOnly))
- return false;
- return open(fileStream, fileName, formatParam);
- }
-
- // ...No, the source is not a local file
- if (url.host() == "")
- url.setHost("localhost");
- fileName = url.path();
-
- // http://stackoverflow.com/questions/9604633/reading-a-file-located-in-memory-with-libavformat
- // Load from URL
- QEventLoop loop; // for synchronous url fetch http://stackoverflow.com/questions/5486090/qnetworkreply-wait-for-finished
- QObject::connect(&networkManager, SIGNAL(finished(QNetworkReply*)),
- &loop, SLOT(quit()));
- QNetworkRequest request = QNetworkRequest(url);
- // qDebug() << "networkManager" << __FILE__ << __LINE__;
- reply = networkManager.get(request);
- loop.exec();
- if (reply->error() != QNetworkReply::NoError) {
- // qDebug() << reply->error();
- reply->deleteLater();
- reply = NULL;
- return false;
- }
- QIODevice * stream = reply;
- // Mpeg needs seekable device, so create in-memory buffer if necessary
- if (stream->isSequential()) {
- byteArray = stream->readAll();
- fileBuffer.setBuffer(&byteArray);
- fileBuffer.open(QIODevice::ReadOnly);
- if (! fileBuffer.seek(0))
- return false;
- stream = &fileBuffer;
- assert(! stream->isSequential());
- }
- bool result = open(*stream, fileName, formatParam);
- return result;
-}
-
-bool decoder::open(QIODevice& fileStream, QString& fileName, enum PixelFormat formatParam)
-{
- // http://stackoverflow.com/questions/9604633/reading-a-file-located-in-memory-with-libavformat
- // I think AVIOContext is the trick used to redivert the input stream
- ioBuffer = (unsigned char *)av_malloc(ioBufferSize + FF_INPUT_BUFFER_PADDING_SIZE); // can get av_free()ed by libav
- avioContext = avio_alloc_context(ioBuffer, ioBufferSize, 0, (void*)(&fileStream), &readFunction, NULL, &seekFunction);
- container = avformat_alloc_context();
- container->pb = avioContext;
-
- // Open file, check usability
- std::string fileNameStd = fileName.toStdString();
- if (!avtry( avformat_open_input(&container, fileNameStd.c_str(), NULL, NULL), fileNameStd ))
- return false;
- return openUsingInitializedContainer(formatParam);
-}
-*/
-// file name based method for historical continuity
-bool libav::decoder::open(char* fileName, enum PixelFormat formatParam){
-
- if (!avtry( avformat_open_input(&container, fileName, NULL, NULL), string(fileName) ))
- return false;
- return openUsingInitializedContainer(formatParam);
-}
-bool libav::decoder::open(string& fileName, enum PixelFormat formatParam)
-{
- // Open file, check usability
-
- if (!avtry( avformat_open_input(&container, fileName.c_str(), NULL, NULL), fileName ))
- return false;
- return openUsingInitializedContainer(formatParam);
-}
-
-
-bool libav::decoder::openUsingInitializedContainer(enum PixelFormat formatParam)
-{
- format = formatParam;
- sc = getNumberOfChannels();
-
- if (!avtry( avformat_find_stream_info(container, NULL), "Cannot find stream information." ))
- return false;
- if (!avtry( videoStream=av_find_best_stream(container, AVMEDIA_TYPE_VIDEO, -1, -1, &pCodec, 0), "Cannot find a video stream." ))
- return false;
- pCtx=container->streams[videoStream]->codec;
- width = pCtx->width;
- height = pCtx->height;
- if (!avtry( avcodec_open2(pCtx, pCodec, NULL), "Cannot open video decoder." ))
- return false;
-
- /* Frame rate fix for some codecs */
- if( pCtx->time_base.num > 1000 && pCtx->time_base.den == 1 )
- pCtx->time_base.den = 1000;
-
- /* Compute the total number of frames in the file */
- /* duration is in microsecs */
- numFrames = (int)(( container->duration / (double)AV_TIME_BASE ) * pCtx->time_base.den + 0.5);
-
- /* Get framebuffers */
- if (! (pRaw = avcodec_alloc_frame()) )
- throw std::runtime_error("");
- if (! (pFrameRGB = avcodec_alloc_frame()) )
- throw std::runtime_error("");
-
- /* Create data buffer */
- if (format == PIX_FMT_NONE) {
- numBytes = 0;
- buffer = NULL;
- blank = NULL;
- pFrameRGB = NULL;
- Sctx = NULL;
- }
- else {
- numBytes = avpicture_get_size( format, pCtx->width, pCtx->height ); // RGB24 format
- if (! (buffer = (uint8_t*)av_malloc(numBytes + FF_INPUT_BUFFER_PADDING_SIZE)) ) // RGB24 format
- throw std::runtime_error("");
- if (! (blank = (uint8_t*)av_mallocz(avpicture_get_size(pCtx->pix_fmt,width,height))) ) // native codec format
- throw std::runtime_error("");
-
- /* Init buffers */
- avpicture_fill( (AVPicture * ) pFrameRGB, buffer, format,
- pCtx->width, pCtx->height );
-
- /* Init scale & convert */
- if (! (Sctx=sws_getContext(
- pCtx->width,
- pCtx->height,
- pCtx->pix_fmt,
- width,
- height,
- format,
- SWS_POINT, // fastest?
- NULL,NULL,NULL)) )
- throw std::runtime_error("");
- }
-
- /* Give some info on stderr about the file & stream */
- //dump_format(container, 0, fname, 0);
-
- previousFrameIndex = -1;
- return true;
-}
-
-bool libav::decoder::fetchFrame(int targetFrameIndex)
-{
- if ((targetFrameIndex < 0) || (targetFrameIndex > numFrames))
- return false;
- if (targetFrameIndex == (previousFrameIndex + 1)) {
- if (! readNextFrame(targetFrameIndex))
- return false;
- }
- else {
- int64_t response=seekToFrame(targetFrameIndex);
- if (response < 0)
- return false;
- if (response!=targetFrameIndex){
- cerr<<"libav::decoder asked for "<<targetFrameIndex<<", got "<<response<<endl; //does not seem to be aware of wrong frame
- }
- }
- previousFrameIndex = targetFrameIndex;
- return true;
-}
-
-// \returns current frame on success, otherwise -1
-int libav::decoder::seekToFrame(int targetFrameIndex)
-{
- int64_t duration = container->streams[videoStream]->duration;
- int64_t ts = av_rescale(duration,targetFrameIndex,numFrames);
- int64_t tol = av_rescale(duration,1,2*numFrames);
- if ( (targetFrameIndex < 0) || (targetFrameIndex >= numFrames) ) {
- return -1;
- }
- int result = avformat_seek_file( container, //format context
- videoStream,//stream id
- 0, //min timestamp 0?
- ts, //target timestamp
- ts, //max timestamp
- 0);//flags AVSEEK_FLAG_ANY //doesn't seem to work great
- if (result < 0)
- return -1;
-
- avcodec_flush_buffers(pCtx);
- if (! readNextFrame(targetFrameIndex))
- return -1;
-
- return targetFrameIndex;
-}
-
-bool libav::decoder::readNextFrame(int targetFrameIndex)
-{
- AVPacket packet = {0};
- av_init_packet(&packet);
- bool result = readNextFrameWithPacket(targetFrameIndex, packet, pRaw);
- av_free_packet(&packet);
- return result;
-}
-
-// WARNING this method can raise an exception
-bool libav::decoder::readNextFrameWithPacket(int targetFrameIndex, AVPacket& packet, AVFrame* pYuv)
-{
- int finished = 0;
- do {
- finished = 0;
- av_free_packet(&packet);
- int result;
- if (!avtry(av_read_frame( container, &packet ), "Failed to read frame"))
- return false; // !!NOTE: see docs on packet.convergence_duration for proper seeking
- if( packet.stream_index != videoStream ) /* Is it what we're trying to parse? */
- continue;
- if (!avtry(avcodec_decode_video2( pCtx, pYuv, &finished, &packet ), "Failed to decode video"))
- return false;
- // handle odd cases and debug
- if((pCtx->codec_id==CODEC_ID_RAWVIDEO) && !finished)
- {
- avpicture_fill( (AVPicture * ) pYuv, blank, pCtx->pix_fmt,width, height ); // set to blank frame
- finished = 1;
- }
-#if 0 // very useful for debugging
- cout << "Packet - pts:" << (int)packet.pts;
- cout << " dts:" << (int)packet.dts;
- cout << " - flag: " << packet.flags;
- cout << " - finished: " << finished;
- cout << " - Frame pts:" << (int)pYuv->pts;
- cout << " " << (int)pYuv->best_effort_timestamp;
- cout << endl;
- /* printf("Packet - pts:%5d dts:%5d (%5d) - flag: %1d - finished: %3d - Frame pts:%5d %5d\n",
- (int)packet.pts,(int)packet.dts,
- packet.flags,finished,
- (int)pYuv->pts,(int)pYuv->best_effort_timestamp); */
-#endif
- if(!finished) {
- if (packet.pts == AV_NOPTS_VALUE)
- throw std::runtime_error("");
- if (packet.size == 0) // packet.size==0 usually means EOF
- break;
- }
- } while ( (!finished) || (pYuv->best_effort_timestamp < targetFrameIndex));
-
- av_free_packet(&packet);
-
- if (format != PIX_FMT_NONE) {
- sws_scale(Sctx, // sws context
- pYuv->data, // src slice
- pYuv->linesize, // src stride
- 0, // src slice origin y
- pCtx->height, // src slice height
- pFrameRGB->data, // dst
- pFrameRGB->linesize ); // dst stride
- }
-
- previousFrameIndex = targetFrameIndex;
- return true;
-}
-
-uint8_t libav::decoder::getPixelIntensity(int x, int y, Channel c) const
-{
- return *(pFrameRGB->data[0] + y * pFrameRGB->linesize[0] + x * sc + c);
-}
-
-int libav::decoder::getNumberOfFrames() const { return numFrames; }
-
-int libav::decoder::getWidth() const { return width; }
-
-int libav::decoder::getHeight() const { return height; }
-
-int libav::decoder::getNumberOfChannels() const
-{
- switch(format)
- {
- case PIX_FMT_BGRA:
- return 4;
- break;
- case PIX_FMT_RGB24:
- return 3;
- break;
- case PIX_FMT_GRAY8:
- return 1;
- break;
- default:
- return 0;
- break;
- }
- return 0;
-}
-
-void libav::decoder::initialize()
-{
- Sctx = NULL;
- pRaw = NULL;
- pFrameRGB = NULL;
- pCtx = NULL;
- container = NULL;
- buffer = NULL;
- blank = NULL;
- pCodec = NULL;
- format = PIX_FMT_NONE;
- //network stuff
- //reply = NULL;
- //ioBuffer = NULL;
- //avioContext = NULL;
- maybeInitFFMpegLib();
-}
-
-void libav::maybeInitFFMpegLib()
-{
- if (b_is_one_time_inited)
- return;
- av_register_all();
- avcodec_register_all();
- avformat_network_init();
- b_is_one_time_inited = true;
-}
-
-bool libav::decoder::avtry(int result, const std::string& msg) {
- if ((result < 0) && (result != AVERROR_EOF)) {
- char buf[1024];
- av_strerror(result, buf, sizeof(buf));
- std::string message = std::string("libav::Error: ") + msg + buf;
- //qDebug() << QString(message.c_str());
- cerr<<message<<endl;
- return false;
- }
- return true;
-}
-
-
-
-
-///////////////////////////
-// encoder methods //
-///////////////////////////
-
-
-libav::encoder::encoder(const char * file_name, int width, int height, float _framerate,enum AVCodecID codec_id)
- : picture_yuv(NULL)
- , picture_rgb(NULL)
- , container(NULL)
-{
- //multiply float seconds by this to get pts
- timebase=((float)AV_TIME_BASE_Q.den)/(AV_TIME_BASE_Q.num*_framerate*3.125f); //no idea where the 3.125 comes from
-
- if (0 != (width % 2))
- cerr << "WARNING: Video width is not a multiple of 2" << endl;
- if (0 != (height % 2))
- cerr << "WARNING: Video height is not a multiple of 2" << endl;
-
- maybeInitFFMpegLib();
-
- container = avformat_alloc_context();
- if (NULL == container)
- throw std::runtime_error("Unable to allocate format context");
-
- AVOutputFormat * fmt = av_guess_format(NULL, file_name, NULL);
- if (!fmt)
- fmt = av_guess_format("mpeg", NULL, NULL);
- if (!fmt)
- throw std::runtime_error("Unable to deduce video format");
- container->oformat = fmt;
-
- fmt->video_codec = codec_id;
- // fmt->video_codec = CODEC_ID_H264; // fails to write
-
- AVStream * video_st = avformat_new_stream(container, NULL);
-
- pCtx = video_st->codec;
- pCtx->codec_id = fmt->video_codec;
- pCtx->codec_type = AVMEDIA_TYPE_VIDEO;
- // resolution must be a multiple of two
- pCtx->width = width;
- pCtx->height = height;
-
- // bit_rate determines image quality
- pCtx->bit_rate = width * height * 4; // ?
- // pCtx->qmax = 50; // no effect?
-
- // "high quality" parameters from http://www.cs.ait.ac.th/~on/mplayer/pl/menc-feat-enc-libavcodec.html
- // vcodec=mpeg4:mbd=2:mv0:trell:v4mv:cbp:last_pred=3:predia=2:dia=2:vmax_b_frames=2:vb_strategy=1:precmp=2:cmp=2:subcmp=2:preme=2:vme=5:naq:qns=2
- if (false) // does not help
- // if (pCtx->codec_id == CODEC_ID_MPEG4)
- {
- pCtx->mb_decision = 2;
- pCtx->last_predictor_count = 3;
- pCtx->pre_dia_size = 2;
- pCtx->dia_size = 2;
- pCtx->max_b_frames = 2;
- pCtx->b_frame_strategy = 2;
- pCtx->trellis = 2;
- pCtx->compression_level = 2;
- pCtx->global_quality = 300;
- pCtx->pre_me = 2;
- pCtx->mv0_threshold = 1;
- // pCtx->quantizer_noise_shaping = 2; // deprecated
- // TODO
- }
-
- pCtx->time_base = (AVRational){1, 25}; /////TODO FIX TO SUPPORT OTHER RATES
- // pCtx->time_base = (AVRational){1, 10};
- pCtx->gop_size = 12; // emit one intra frame every twelve frames
- // pCtx->max_b_frames = 0;
- pCtx->pix_fmt = PIX_FMT_YUV420P;
- if (fmt->flags & AVFMT_GLOBALHEADER)
- pCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
- if (pCtx->codec_id == CODEC_ID_H264)
- {
- // http://stackoverflow.com/questions/3553003/encoding-h-264-with-libavcodec-x264
- pCtx->coder_type = 1; // coder = 1
- pCtx->flags|=CODEC_FLAG_LOOP_FILTER; // flags=+loop
- pCtx->me_cmp|= 1; // cmp=+chroma, where CHROMA = 1
- // pCtx->partitions|=X264_PART_I8X8+X264_PART_I4X4+X264_PART_P8X8+X264_PART_B8X8; // partitions=+parti8x8+parti4x4+partp8x8+partb8x8
- pCtx->me_method=ME_HEX; // me_method=hex
- pCtx->me_subpel_quality = 7; // subq=7
- pCtx->me_range = 16; // me_range=16
- pCtx->gop_size = 250; // g=250
- pCtx->keyint_min = 25; // keyint_min=25
- pCtx->scenechange_threshold = 40; // sc_threshold=40
- pCtx->i_quant_factor = 0.71; // i_qfactor=0.71
- pCtx->b_frame_strategy = 1; // b_strategy=1
- pCtx->qcompress = 0.6; // qcomp=0.6
- pCtx->qmin = 10; // qmin=10
- pCtx->qmax = 51; // qmax=51
- pCtx->max_qdiff = 4; // qdiff=4
- pCtx->max_b_frames = 3; // bf=3
- pCtx->refs = 3; // refs=3
- // pCtx->directpred = 1; // directpred=1
- pCtx->trellis = 1; // trellis=1
- // pCtx->flags2|=CODEC_FLAG2_BPYRAMID+CODEC_FLAG2_MIXED_REFS+CODEC_FLAG2_WPRED+CODEC_FLAG2_8X8DCT+CODEC_FLAG2_FASTPSKIP; // flags2=+bpyramid+mixed_refs+wpred+dct8x8+fastpskip
- // pCtx->weighted_p_pred = 2; // wpredp=2
- // libx264-main.ffpreset preset
- // pCtx->flags2|=CODEC_FLAG2_8X8DCT;
- // pCtx->flags2^=CODEC_FLAG2_8X8DCT; // flags2=-dct8x8
- }
-
- AVCodec * codec = avcodec_find_encoder(pCtx->codec_id);
- if (NULL == codec)
- throw std::runtime_error("Unable to find Mpeg4 codec");
- if (codec->pix_fmts)
- pCtx->pix_fmt = codec->pix_fmts[0];
- {
- //QMutexLocker lock(&decoder::mutex);
- if (avcodec_open2(pCtx, codec, NULL) < 0)
- throw std::runtime_error("Error opening codec");
- }
-
- /* Get framebuffers */
- if (! (picture_yuv = avcodec_alloc_frame()) ) // final frame format
- throw std::runtime_error("");
- if (! (picture_rgb = avcodec_alloc_frame()) ) // rgb version I can understand easily
- throw std::runtime_error("");
- /* the image can be allocated by any means and av_image_alloc() is
- * just the most convenient way if av_malloc() is to be used */
- if ( av_image_alloc(picture_yuv->data, picture_yuv->linesize,
- pCtx->width, pCtx->height, pCtx->pix_fmt, 1) < 0 )
- throw std::runtime_error("Error allocating YUV frame buffer");
- if ( av_image_alloc(picture_rgb->data, picture_rgb->linesize,
- pCtx->width, pCtx->height, PIX_FMT_RGB24, 1) < 0 )
- throw std::runtime_error("Error allocating RGB frame buffer");
-
- /* Init scale & convert */
- if (! (Sctx=sws_getContext(
- width,
- height,
- PIX_FMT_RGB24,
- pCtx->width,
- pCtx->height,
- pCtx->pix_fmt,
- SWS_BICUBIC,NULL,NULL,NULL)) )
- throw std::runtime_error("");
-
-//
-//
-// added audio init
- AVCodec * acodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
- int ret = avcodec_open2(pCtx, acodec, NULL);
- if (ret < 0) {
- throw std::runtime_error("Could not open audio codec:");
-
- }
-
- if (pCtx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
- audio_input_frame_size = 10000;
- else
- audio_input_frame_size = pCtx->frame_size; //is coming out at 0?
-
- audiostep=((float)audio_input_frame_size)/(pCtx->sample_rate);
-
-
-// are we supposed to use the same codeccontext?
-//
-
- /* open the output file */
- if (!(fmt->flags & AVFMT_NOFILE))
- {
- //QMutexLocker lock(&decoder::mutex);
- if (avio_open(&container->pb, file_name, AVIO_FLAG_WRITE) < 0)
- throw std::runtime_error("Error opening output video file");
- }
- avformat_write_header(container, NULL);
-}
-
-void libav::encoder::setPixelIntensity(int x, int y, int c, uint8_t value)
-{
- uint8_t * ptr = picture_rgb->data[0] + y * picture_rgb->linesize[0] + x * 3 + c;
- *ptr = value;
-}
-
-void libav::encoder::write_frame(float seconds,uint8_t *rgbdata)
-{
- picture_rgb->data[0]=rgbdata;
-
- // convert from RGB24 to YUV
- sws_scale(Sctx, // sws context
- picture_rgb->data, // src slice
- picture_rgb->linesize, // src stride
- 0, // src slice origin y
- pCtx->height, // src slice height
- picture_yuv->data, // dst
- picture_yuv->linesize ); // dst stride
-
- /* encode the image */
- // use non-deprecated avcodec_encode_video2(...)
- AVPacket packet;
- av_init_packet(&packet);
- packet.data = NULL;
- packet.size = 0;
-
- //no time stamps as is
- //http://dranger.com/ffmpeg/tutorial07.html
-
- picture_yuv->pts=(uint64_t)(seconds*timebase);
-
- int got_packet;
- int ret = avcodec_encode_video2(pCtx,
- &packet,
- picture_yuv,
- &got_packet);
- if (ret < 0)
- throw std::runtime_error("Video encoding failed");
- if (got_packet)
- {
- // std::cout << "encoding frame" << std::endl;
- int result = av_write_frame(container, &packet);
- av_destruct_packet(&packet);
- }
-}
-void libav::encoder::write_frame(float seconds,uint16_t *audiodata){
- audio_frame = avcodec_alloc_frame();
- AVPacket pkt = { 0 }; // data and size must be 0;
- int got_packet, ret;
- av_init_packet(&pkt);
- audio_frame->nb_samples = audio_input_frame_size;
- uint8_t *sampleptr;
- int bufsize=audio_input_frame_size * av_get_bytes_per_sample(pCtx->sample_fmt) *pCtx->channels;
- if (audiodata) {
- sampleptr=(uint8_t*)audiodata;
- }
- else {
- sampleptr=new uint8_t[bufsize];
- memset(sampleptr,0,bufsize);
- }
-
- avcodec_fill_audio_frame(audio_frame, pCtx->channels, pCtx->sample_fmt,
- sampleptr,
- audio_input_frame_size *
- av_get_bytes_per_sample(pCtx->sample_fmt) *
- pCtx->channels, 0); //;
-
- audio_frame->pts=(uint64_t)(seconds*timebase);
-
- ret = avcodec_encode_audio2(pCtx, &pkt, audio_frame, &got_packet);
- if (!audiodata) {
- delete[] sampleptr;
- }
- if (ret < 0) {
- throw std::runtime_error("Audio encoding failed");
- }
-
- if (!got_packet)
- return;
-
- // ? pkt.stream_index = st->index;
-
- ret = av_interleaved_write_frame(container, &pkt);
- avcodec_free_frame(&audio_frame);
-}
-
-/* virtual */
-libav::encoder::~encoder()
-{
- int result = av_write_frame(container, NULL); // flush
- result = av_write_trailer(container);
- {
- //QMutexLocker lock(&decoder::mutex);
- avio_close(container->pb);
- }
- for (int i = 0; i < container->nb_streams; ++i)
- av_freep(container->streams[i]);
- av_free(container);
- container = NULL;
-
- {
- //QMutexLocker lock(&decoder::mutex);
- avcodec_close(pCtx);
- }
- av_free(pCtx);
- pCtx = NULL;
- av_free(picture_yuv->data[0]);
- av_free(picture_yuv);
- picture_yuv = NULL;
- av_free(picture_rgb->data[0]);
- av_free(picture_rgb);
- picture_rgb = NULL;
-}
-
-bool libav::exporter::setup(int w,int h, int bitRate, int frameRate, std::string container){
-
- maybeInitFFMpegLib();
-
- this->w=w;
- this->h=h;
- this->bitRate=bitRate;
- this->frameRate=frameRate;
- this->container=container;
-
- return true;
-}
-
-bool libav::exporter::record(std::string filename){
-
- // allocate the output media context //
- avformat_alloc_output_context2(&oc, NULL, NULL, filename.c_str());
- if (!oc) {
- printf("Could not deduce output format from file extension: using MPEG.\n");
- avformat_alloc_output_context2(&oc, NULL, "mpeg", filename.c_str());
- }
- if (!oc) {
- return false;
- }
- fmt = oc->oformat;
-
- // Add the audio and video streams using the default format codecs
- // * and initialize the codecs. //
- video_st = NULL;
- audio_st = NULL;
-
- if (fmt->video_codec != AV_CODEC_ID_NONE) {
- video_st = add_stream(oc, &video_codec, fmt->video_codec);
- }
- if (fmt->audio_codec != AV_CODEC_ID_NONE) {
- audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
- }
-
- //set initial video params
- video_st->codec->width=w;
- video_st->codec->height=h;
- video_st->codec->time_base.num = 1;//codecCtx->ticks_per_frame;
- video_st->codec->time_base.den = frameRate;
- video_st->time_base = video_st->codec->time_base;
- //audioStream->time_base = codecCtx->time_base; //???has the capability of crashing
-
- video_st->codec->gop_size = 10; /* emit one intra frame every ten frames */
- video_st->codec->pix_fmt = PIX_FMT_YUV420P;
-
- // Now that all the parameters are set, we can open the audio and
- // * video codecs and allocate the necessary encode buffers. //
- if (video_st)
- open_video(oc, video_codec, video_st);
- if (audio_st) {
- audioframesize=open_audio(oc, audio_codec, audio_st);
- audiostep=((float)audioframesize)/(audio_st->codec->sample_rate);
- std::cerr << "opened audio codec with "<<audioframesize<<" frame size and "<<audiostep<<" seconds per frame"<<std::endl;
- }
-
-
- av_dump_format(oc, 0, filename.c_str(), 1);
-
- // open the output file, if needed //
- if (!(fmt->flags & AVFMT_NOFILE)) {
- int ret = avio_open(&oc->pb, filename.c_str(), AVIO_FLAG_WRITE);
- if (ret < 0) {
- std::cerr <<"Could not open " << filename.c_str() << std::endl;
- return false;
- }
- }
-
- // Write the stream header, if any. //
- int ret = avformat_write_header(oc, NULL);
- if (ret < 0) {
- //std::cerr <<"Error occurred when opening output file:" << av_err2str(ret) << std::endl;
- return false;
- }
-
- if (frame)
- frame->pts = 0;
-
- outputframe=0;
-
- return true;
-}
-bool libav::exporter::encodeFrame(unsigned char *pixels,uint16_t *samples){
- // Compute current audio and video time. //
- if (audio_st)
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- else
- audio_pts = 0.0;
-
- if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num /
- video_st->time_base.den;
- else
- video_pts = 0.0;
-
- // write interleaved audio and video frames //
- if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
- write_audio_frame(oc, audio_st, samples);
- } else {
- write_video_frame(oc, video_st, pixels);
-
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- }
-
- //std::cerr << "encoded frame " << outputframe << std::endl;
- outputframe++;
-
- return true;
-}
-bool libav::exporter::encodeFrame(unsigned char *pixels,AVPacket *audio){
- // Compute current audio and video time. //
- if (audio_st)
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- else
- audio_pts = 0.0;
-
- if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num /
- video_st->time_base.den;
- else
- video_pts = 0.0;
-
- // write interleaved audio and video frames //
- if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
- write_audio_frame(oc, audio_st, audio);
- } else {
- write_video_frame(oc, video_st, pixels);
-
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- }
-
- //std::cerr << "encoded frame " << outputframe << std::endl;
- outputframe++;
-
- return true;
-}
-bool libav::exporter::encodeFrame(unsigned char *pixels){
- video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
- write_video_frame(oc, video_st, pixels);
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- outputframe++;
- return true;
-}
-bool libav::exporter::encodeFrame(uint16_t *samples){
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- write_audio_frame(oc, audio_st, samples);
- return true;
-}
-void libav::exporter::finishRecord(){
-
- av_write_trailer(oc);
- // Close each codec. //
- if (video_st)
- close_video(oc, video_st);
- if (audio_st)
- close_audio(oc, audio_st);
-
- if (!(fmt->flags & AVFMT_NOFILE))
- // Close the output file. //
- avio_close(oc->pb);
-
- // free the stream //
- avformat_free_context(oc);
-}
-
-AVStream* libav::exporter::add_stream(AVFormatContext *oc, AVCodec **codec,enum AVCodecID codec_id)
- {
- AVCodecContext *c;
- AVStream *st;
-
- // find the encoder //
- *codec = avcodec_find_encoder(codec_id);
- if (!(*codec)) {
- //fprintf(stderr, "Could not find encoder for '%s'\n",
- // avcodec_get_name(codec_id));
- exit(1);
- }
-
- st = avformat_new_stream(oc, *codec);
- if (!st) {
- //fprintf(stderr, "Could not allocate stream\n");
- exit(1);
- }
- st->id = oc->nb_streams-1;
- c = st->codec;
-
- switch ((*codec)->type) {
- case AVMEDIA_TYPE_AUDIO:
- st->id = 1;
- c->sample_fmt = AV_SAMPLE_FMT_S16;
- c->bit_rate = 64000;
- c->sample_rate = 44100;
- c->channels = 2;
- c->channel_layout=AV_CH_LAYOUT_STEREO;
- break;
-
- case AVMEDIA_TYPE_VIDEO:
- c->codec_id = codec_id;
-
- c->bit_rate = 400000;
- // Resolution must be a multiple of two. //
- c->width = 352;
- c->height = 288;
- // timebase: This is the fundamental unit of time (in seconds) in terms
- // * of which frame timestamps are represented. For fixed-fps content,
- // * timebase should be 1/framerate and timestamp increments should be
- // * identical to 1. //
- c->time_base.den = frameRate;
- c->time_base.num = 1;
- c->gop_size = 12; // emit one intra frame every twelve frames at most //
- c->pix_fmt = AV_PIX_FMT_YUV420P; //ADDED HARDCODED TJR 280513
- if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
- // just for testing, we also add B frames //
- c->max_b_frames = 2;
- }
- if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
- // Needed to avoid using macroblocks in which some coeffs overflow.
- // * This does not happen with normal video, it just happens here as
- // * the motion of the chroma plane does not match the luma plane. //
- c->mb_decision = 2;
- }
- break;
-
- default:
- break;
- }
-
- // Some formats want stream headers to be separate. //
- if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
- return st;
- }
-
-void libav::exporter::open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
- {
- int ret;
- AVCodecContext *c = st->codec;
-
- // open the codec //
- ret = avcodec_open2(c, codec, NULL);
- if (ret < 0) {
- //fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
- exit(1);
- }
-
- // allocate and init a re-usable frame //
- frame = avcodec_alloc_frame();
- if (!frame) {
- //fprintf(stderr, "Could not allocate video frame\n");
- exit(1);
- }
-
- // Allocate the encoded raw picture. //
- ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
- if (ret < 0) {
- //fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
- exit(1);
- }
-
- // If the output format is not YUV420P, then a temporary YUV420P
- // * picture is needed too. It is then converted to the required
- // * output format. //
- if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
- ret = avpicture_alloc(&src_picture, AV_PIX_FMT_RGB24, c->width, c->height);
- if (ret < 0) {
- //fprintf(stderr, "Could not allocate temporary picture: %s\n",
- // av_err2str(ret));
- exit(1);
- }
- }
-
- // copy data and linesize picture pointers to frame //
- *((AVPicture *)frame) = dst_picture;
-
- outPixels = (uint8_t*)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, st->codec->width,st->codec->height));
- }
-
- int libav::exporter::open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
- {
- AVCodecContext *c;
- int ret;
-
- c = st->codec;
-
- // open it //
- ret = avcodec_open2(c, codec, NULL);
- if (ret < 0) {
- //fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
- exit(1);
- }
-
- // init signal generator //
- t = 0;
- tincr = 2 * M_PI * 110.0 / c->sample_rate;
- // increment frequency by 110 Hz per second //
- tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
-
- if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
- audio_input_frame_size = 10000;
- else
- audio_input_frame_size = c->frame_size;
-
- /*
- samples = av_malloc(audio_input_frame_size *
- av_get_bytes_per_sample(c->sample_fmt) *
- c->channels);
- if (!samples) {
- //fprintf(stderr, "Could not allocate audio samples buffer\n");
- exit(1);
- }
- */
- return audio_input_frame_size;
- }
-
- void libav::exporter::write_audio_frame(AVFormatContext *oc, AVStream *st,uint16_t *samples)
- {
- AVCodecContext *c;
- AVPacket pkt = { 0 }; // data and size must be 0;
- AVFrame *frame = avcodec_alloc_frame();
- int got_packet, ret;
-
- av_init_packet(&pkt);
- c = st->codec;
-
- //get_audio_frame(samples, audio_input_frame_size, c->channels);
- frame->nb_samples = audio_input_frame_size;
- uint8_t *sampleptr;
- int bufsize=audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) *c->channels;
- if (samples) {
- sampleptr=(uint8_t*)samples;
- }
- else {
- sampleptr=new uint8_t[bufsize];
- memset(sampleptr,0,bufsize);
- }
-
- avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- sampleptr,
- audio_input_frame_size *
- av_get_bytes_per_sample(c->sample_fmt) *
- c->channels, 0); //;
- //frame->sample_rate=44100; //hard coded input rate- nope, this doesn't help
- //frame->format=AV_SAMPLE_FMT_S16P;
- //?? why is ffmpeg reporting fltp as the sample format??? doesn't seem to have an effect to change this though
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
- if (!samples) {
- delete[] sampleptr;
- }
- if (ret < 0) {
- //fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
- exit(1);
- }
-
- if (!got_packet)
- return;
-
- pkt.stream_index = st->index;
-
- // Write the compressed frame to the media file. //
- ret = av_interleaved_write_frame(oc, &pkt);
- if (ret != 0) {
- //fprintf(stderr, "Error while writing audio frame: %s\n",
- // av_err2str(ret));
- exit(1);
- }
- avcodec_free_frame(&frame);
- }
-
- void libav::exporter::write_audio_frame(AVFormatContext *oc, AVStream *st,AVPacket *pkt)
- {
- /*
- AVCodecContext *c;
- AVPacket pkt = { 0 }; // data and size must be 0;
- AVFrame *frame = avcodec_alloc_frame();
- int got_packet, ret;
-
- av_init_packet(&pkt);
- c = st->codec;
-
- //get_audio_frame(samples, audio_input_frame_size, c->channels);
- frame->nb_samples = audio_input_frame_size;
- uint8_t *sampleptr;
- int bufsize=audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) *c->channels;
- if (samples) {
- sampleptr=(uint8_t*)samples;
- }
- else {
- sampleptr=new uint8_t[bufsize];
- memset(sampleptr,0,bufsize);
- }
- avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- sampleptr,
- audio_input_frame_size *
- av_get_bytes_per_sample(c->sample_fmt) *
- c->channels, 1);
-
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
- if (!samples) {
- free(sampleptr);
- }
- if (ret < 0) {
- //fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
- exit(1);
- }
-
- if (!got_packet)
- return;
- */
-
- pkt->stream_index = st->index;
-
- // Write the compressed frame to the media file. //
- int ret = av_interleaved_write_frame(oc, pkt);
- if (ret != 0) {
- //fprintf(stderr, "Error while writing audio frame: %s\n",
- // av_err2str(ret));
- exit(1);
- }
- //avcodec_free_frame(&frame);
- av_free_packet(pkt);
- }
-
- void libav::exporter::close_audio(AVFormatContext *oc, AVStream *st)
- {
- avcodec_close(st->codec);
-
-
- }
-
- void libav::exporter::write_video_frame(AVFormatContext *oc, AVStream *st, uint8_t *pixels)
- {
- int ret;
- static struct SwsContext *sws_ctx;
- AVCodecContext *c = st->codec;
-
-/*
- if (frame_count >= STREAM_NB_FRAMES) {
- // No more frames to compress. The codec has a latency of a few
- // * frames if using B-frames, so we get the last frames by
- // * passing the same picture again. //
- } else {
- if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
- // as we only generate a YUV420P picture, we must convert it
- // * to the codec pixel format if needed //
- if (!sws_ctx) {
- sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
- c->width, c->height, c->pix_fmt,
- sws_flags, NULL, NULL, NULL);
- if (!sws_ctx) {
- //fprintf(stderr,
- // "Could not initialize the conversion context\n");
- exit(1);
- }
- }
- fill_yuv_image(&src_picture, frame_count, c->width, c->height);
- sws_scale(sws_ctx,
- (const uint8_t * const *)src_picture.data, src_picture.linesize,
- 0, c->height, dst_picture.data, dst_picture.linesize);
- } else {
- fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
- }
- }
-*/
- //always convert RGB to YUV
- //should be context allocated once per render instead of per frame??
- //
- //
- sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
- c->width, c->height, AV_PIX_FMT_YUV420P,
- sws_flags, NULL, NULL, NULL);
-
- avpicture_fill(&src_picture, pixels, PIX_FMT_RGB24, c->width,c->height);
- //avpicture_fill(&dst_picture, outPixels, PIX_FMT_YUV420P, c->width,c->height);
-
- sws_scale(sws_ctx, src_picture.data, src_picture.linesize, 0, c->height, dst_picture.data, dst_picture.linesize);
- //fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
- if (oc->oformat->flags & AVFMT_RAWPICTURE) {
- // Raw video case - directly store the picture in the packet //
- AVPacket pkt;
- av_init_packet(&pkt);
-
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = st->index;
- pkt.data = dst_picture.data[0];
- pkt.size = sizeof(AVPicture);
-
- ret = av_interleaved_write_frame(oc, &pkt);
- } else {
- AVPacket pkt = { 0 };
- int got_packet;
- av_init_packet(&pkt);
-
- // encode the image //
- ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
- if (ret < 0) {
- //fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
- exit(1);
- }
- // If size is zero, it means the image was buffered. //
-
- if (!ret && got_packet && pkt.size) {
- pkt.stream_index = st->index;
-
- // Write the compressed frame to the media file. //
- ret = av_interleaved_write_frame(oc, &pkt);
- } else {
- ret = 0;
- }
- }
-
- //
- // added 22 may in memory leak run
- //
- sws_freeContext(sws_ctx); //should be done once per render instead of per frame??
-
- if (ret != 0) {
- //fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
- exit(1);
- }
- frame_count++;
-
- //avcodec_free_frame(&frame);
- }
-
- void libav::exporter::close_video(AVFormatContext *oc, AVStream *st)
- {
- avcodec_close(st->codec);
- av_free(src_picture.data[0]);
- av_free(dst_picture.data[0]);
- av_free(frame);
- av_free(outPixels); //SIGSEV here???
- }
diff --git a/rotord/libavwrapper.h b/rotord/libavwrapper.h
deleted file mode 100755
index a03cbbe..0000000
--- a/rotord/libavwrapper.h
+++ /dev/null
@@ -1,222 +0,0 @@
- #ifndef libavwrapper_H
-#define libavwrapper_H
-
-/*
- * libavwrapper.h
- * May 2012 Christopher Bruns
- * The libavwrapper class is a C++ wrapper around the poorly documented
- * libavcodec movie API used by ffmpeg. I made extensive use of Nathan
- * Clack's implemention in the whisk project.
- *
- * The libavwrapper.h and libavwrapper.cpp files depend only on the libavcodec
- * and allied sets of libraries. To compartmentalize and reduce dependencies
- * I placed the Vaa3d specific use of this class into a separate set of
- * source files: loadV3dFFMpeg.h/cpp
- */
-
-
-#ifndef UINT64_C
-#define UINT64_C(c) (c ## ULL)
-#endif
-
-
-
-extern "C" {
-#include <libavcodec/avcodec.h>
-#include <libavformat/avformat.h>
-#include <libavutil/pixfmt.h>
-#include <libavutil/opt.h>
-#include <libavutil/imgutils.h>
-
-#include <libswscale/swscale.h> //?
-}
-
-/*
-#include <QFile>
-#include <QNetworkAccessManager>
-#include <QMutex>
-#include <QUrl>
-#include <QBuffer>
-*/
-
-
-#include <string>
-#include <stdexcept>
-#include <iostream>
-#include <fstream>
-
-namespace libav {
-
- static bool b_is_one_time_inited=false;
- // Some libavcodec calls are not reentrant
- //static QMutex mutex;
- void maybeInitFFMpegLib();
-
- static int sws_flags = SWS_BICUBIC;
-
-// Translated to C++ by Christopher Bruns May 2012
-// from ffmeg_adapt.c in whisk package by Nathan Clack, Mark Bolstadt, Michael Meeuwisse
- class decoder
- {
- public:
- enum Channel {
- RED = 0,
- GRAY = 0,
- GREEN = 1,
- BLUE = 2,
- ALPHA = 3
- };
-
-
- decoder(PixelFormat pixelFormat=PIX_FMT_RGB24);
- //decoder(QUrl url, PixelFormat pixelFormat=PIX_FMT_RGB24);
- virtual ~decoder();
- //bool open(QUrl url, enum PixelFormat formatParam = PIX_FMT_RGB24);
- //bool open(QIODevice& fileStream, QString& fileName, enum PixelFormat formatParam = PIX_FMT_RGB24);
- uint8_t getPixelIntensity(int x, int y, Channel c = GRAY) const;
- bool fetchFrame(int targetFrameIndex = 0);
- int getNumberOfFrames() const;
- int getWidth() const;
- int getHeight() const;
- int getNumberOfChannels() const;
- bool readNextFrame(int targetFrameIndex = 0);
- bool readNextFrameWithPacket(int targetFrameIndex, AVPacket& packet, AVFrame* pYuv);
- int seekToFrame(int targetFrameIndex = 0);
-
- // make certain members public, for use by Fast3DTexture class
- AVFrame *pFrameRGB;
- AVFrame *pRaw;
- AVFormatContext *container;
- AVCodecContext *pCtx;
- int videoStream;
- int previousFrameIndex;
- bool isOpen;
-
- bool open(std::string& fileName, enum PixelFormat formatParam = PIX_FMT_RGB24);
- bool open(char* fileName, enum PixelFormat formatParam = PIX_FMT_RGB24);
-
- protected:
-
-
- void initialize();
-
- bool openUsingInitializedContainer(enum PixelFormat formatParam = PIX_FMT_RGB24 );
- static bool avtry(int result, const std::string& msg);
-
- AVCodec *pCodec;
- uint8_t *buffer,
- *blank;
- //struct
- SwsContext *Sctx;
- int width, height;
- PixelFormat format;
- size_t numBytes;
- int numFrames;
- int sc; // number of color channels
-
- // For loading from URL
- /*
- static const int ioBufferSize = 32768;
- unsigned char * ioBuffer;
- QNetworkAccessManager networkManager;
- AVIOContext* avioContext;
- QFile fileStream;
- QNetworkReply* reply;
- QBuffer fileBuffer;
- QByteArray byteArray;
- */
- };
-
-
- // TODO - finish refactoring based on
- // http://svn.gnumonks.org/trunk/21c3-video/ffmpeg/ffmpeg-0.4.9-pre1/output_example.c
- class encoder
- {
- public:
- //typedef encoder::Channel Channel;
-
- encoder(const char * file_name, int width, int height, float _framerate=25.0f, enum AVCodecID codec_id = CODEC_ID_MPEG4);
- virtual ~encoder();
- void setPixelIntensity(int x, int y, int c, uint8_t value);
- void write_frame(float seconds,uint8_t *rgbdata);
- void write_frame(float seconds,uint16_t *audiodata);
- int get_audio_framesize(){ return audio_input_frame_size; }
- float get_audio_step(){return audiostep;};
-
- protected:
- AVFormatContext *container;
- AVCodecContext *pCtx;
- AVFrame *picture_yuv;
- AVFrame *picture_rgb;
- AVFrame *audio_frame;
- float timebase;
- struct SwsContext *Sctx;
-
- int audio_input_frame_size;
- float audiostep;
- };
-
-
- class exporter {
- public:
- virtual ~exporter();
- bool setup(int w,int h, int bitRate, int frameRate, std::string container);
- bool record(std::string filename);
- bool encodeFrame(unsigned char *pixels, uint16_t *samples);
- bool encodeFrame(unsigned char *pixels,AVPacket *audiopkt); //is possible to just copy the packets?
- bool encodeFrame(unsigned char *pixels);
- bool encodeFrame(uint16_t *samples);
- void finishRecord();
- int get_audio_framesize(){return audioframesize;};
- float get_audio_step(){return audiostep;};
-
- AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,enum AVCodecID codec_id);
- void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st);
- int open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st);
-
- void write_audio_frame(AVFormatContext *oc, AVStream *st,uint16_t *samples);
- void write_audio_frame(AVFormatContext *oc, AVStream *st,AVPacket *pkt);
- void close_audio(AVFormatContext *oc, AVStream *st);
-
- void write_video_frame(AVFormatContext *oc, AVStream *st, uint8_t *pixels);
- void close_video(AVFormatContext *oc, AVStream *st);
-
- private:
- AVOutputFormat *fmt;
- AVFormatContext *oc;
- AVStream *audio_st, *video_st;
- AVCodec *audio_codec, *video_codec;
- double audio_pts, video_pts;
-
- int audioframesize;
- float audiostep;
- int w;
- int h;
- int bitRate;
- int frameRate;
- std::string container;
-
- int outputframe;
-
- // video output //
-
- AVFrame *frame;
- AVPicture src_picture, dst_picture;
- int frame_count;
- uint8_t *outPixels;
-
-
- //************************************************************//
- // audio output //
-
- float t, tincr, tincr2;
- int audio_input_frame_size;
-
-
- };
-
-}
-
-
-
-#endif // libavwrapper_H
diff --git a/rotord/ofxMovieExporter.cpp b/rotord/ofxMovieExporter.cpp
deleted file mode 100755
index 15abfe2..0000000
--- a/rotord/ofxMovieExporter.cpp
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- * ofxMovieExporter.cpp
- *
- * Copyright (c) 2011, Neil Mendoza, http://www.neilmendoza.com
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of 16b.it nor the names of its contributors may be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include "ofxMovieExporter.h"
-//#include "ofThread.h"
-
- const std::string ofxMovieExporter::FILENAME_PREFIX = "capture";
- const std::string ofxMovieExporter::CONTAINER = "mov";
-
- ofxMovieExporter::ofxMovieExporter() {
- outputFormat = NULL;
- formatCtx = NULL;
- videoStream = NULL;
-
- codec = NULL;
- codecCtx = NULL;
- convertCtx = NULL;
-
- inPixels = NULL;
- outPixels = NULL;
- encodedBuf = NULL;
-
- inFrame = NULL;
- outFrame = NULL;
-
- // do one time encoder set up
- av_register_all();
-
- }
-
- bool ofxMovieExporter::setup(
- int outW,
- int outH,
- int bitRate,
- int frameRate,
- AVCodecID codecId,
- std::string container)
- {
- if (outW % 2 == 1 || outH % 2 == 1) {
- cerr << "ofxMovieExporter: Resolution must be a multiple of 2" << endl;
- return false;
- }
-
- this->outW = outW;
- this->outH = outH;
- this->frameRate = frameRate;
- this->bitRate = bitRate;
- this->codecId = codecId;
- this->container = container;
-
- frameInterval = 1.f / (float)frameRate;
-
- // HACK HACK HACK
- // Time not syncing
- // probably related to codec ticks_per_frame
- //frameInterval /= 3.f;
-
- recording = false;
- numCaptures = 0;
-
- inW=outW;
- inH=outH;
-
- convertCtx = sws_getContext(inW, inH, PIX_FMT_RGB24, outW, outH, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
-
- allocateMemory();
-
- return true;
- }
-
- ofxMovieExporter::~ofxMovieExporter()
- {
- if (recording) finishRecord();
-
- //stopThread(true);
- clearMemory();
- }
-
- bool ofxMovieExporter::record(std::string filePrefix, std::string folderPath)
- {
- initEncoder();
-
- std::ostringstream oss;
- oss << folderPath;
- if (folderPath != "" && (folderPath[folderPath.size()-1] != '/' && folderPath[folderPath.size()-1] != '\\'))
- oss << "/";
- oss << filePrefix << numCaptures << "." << container;
- outFileName = oss.str();
-
- // open the output file
- //if (url_fopen(&formatCtx->pb, outFileName.c_str(), 'wb' ) < 0) //url_fopen URL_WRONLY
- // ofLog(OF_LOG_ERROR, "ofxMovieExporter: Could not open file %s", outFileName.c_str());
- if (avio_open(&formatCtx->pb, outFileName.c_str(), AVIO_FLAG_WRITE) < 0) {
- cerr << "ofxMovieExporter: Could not open file "<< outFileName<<endl;
- return false;
- }
-
-
- //ofAddListener(ofEvents.draw, this, &ofxMovieExporter::checkFrame);
-
- AVDictionary *options; //= NULL; causes a forward declaration error!?
- options=NULL;
- // write the stream header, if any
- avformat_write_header(formatCtx,&options);
-
- lastFrameTime = 0;
- aframeNum = 0;
- vframeNum = 0;
- recording = true;
-
- return true;
- }
-
- void ofxMovieExporter::stop()
- {
-
- recording = false;
- numCaptures++;
-
- }
-
- void ofxMovieExporter::setPixelSource(unsigned char* pixels, int w, int h)
- {
-
- if (pixels == NULL)
- {
- //ofLog(OF_LOG_ERROR, "ofxMovieExporter: Could not set NULL pixel source");
- return;
- }
- pixelSource = pixels;
- inW = w;
- inH = h;
- usePixelSource = true;
-
- // resetup encoder etc
- setup(outW, outH, bitRate, frameRate, codecId, container);
- }
-
-
- int ofxMovieExporter::getNumCaptures()
- {
- return numCaptures;
- }
-
- void ofxMovieExporter::resetNumCaptures()
- {
- numCaptures = 0;
- }
-
-// PRIVATE
-
- void ofxMovieExporter::finishRecord()
- {
- av_write_trailer(formatCtx);
-
- // free the encoder
- avcodec_close(codecCtx);
- for(int i = 0; i < formatCtx->nb_streams; i++)
- {
- av_freep(&formatCtx->streams[i]->codec);
- av_freep(&formatCtx->streams[i]);
- }
- av_free(formatCtx);
- formatCtx = NULL;
- //url_fclose(formatCtx->pb);
- }
-
- void ofxMovieExporter::encodeFrame()
- {
-
- avpicture_fill((AVPicture*)inFrame, inPixels, PIX_FMT_RGB24, inW, inH);
- avpicture_fill((AVPicture*)outFrame, outPixels, PIX_FMT_YUV420P, outW, outH);
-
- //perform the conversion for RGB to YUV and size
- sws_scale(convertCtx, inFrame->data, inFrame->linesize, 0, inH, outFrame->data, outFrame->linesize);
-
- int outSize = avcodec_encode_video(codecCtx, encodedBuf, ENCODED_FRAME_BUFFER_SIZE, outFrame);
- if (outSize > 0)
- {
- AVPacket pkt;
- av_init_packet(&pkt);
- //pkt.pts = av_rescale_q(codecCtx->coded_frame->pts, codecCtx->time_base, videoStream->time_base);
- //if(codecCtx->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY;
- //pkt.pts = frameNum;//ofGetFrameNum();//codecCtx->coded_frame->pts;
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.dts = pkt.pts;
- pkt.stream_index = videoStream->index;
- pkt.data = encodedBuf;
- pkt.size = outSize;
- av_write_frame(formatCtx, &pkt);
- }
-
- vframeNum++;
- }
-
- bool ofxMovieExporter::encodeFrame(unsigned char *pixels)
- {
-
- if (pixels==nullptr) return false;
-
- //is it possible to skip the first avpicture_fill?
-
- avpicture_fill((AVPicture*)inFrame, pixels, PIX_FMT_RGB24, inW, inH);
- avpicture_fill((AVPicture*)outFrame, outPixels, PIX_FMT_YUV420P, outW, outH);
-
- //perform the conversion for RGB to YUV and size
- sws_scale(convertCtx, inFrame->data, inFrame->linesize, 0, inH, outFrame->data, outFrame->linesize);
-
-
- AVPacket pkt;
- int outSize = avcodec_encode_video(codecCtx, encodedBuf, ENCODED_FRAME_BUFFER_SIZE, outFrame);
- if (outSize > 0)
- {
-
- av_init_packet(&pkt);
- pkt.pts = (int64_t)vframeNum*(frameInterval*(((float)videoStream->time_base.den)/videoStream->time_base.num));//ofGetFrameNum();//codecCtx->coded_frame->pts;
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.dts = pkt.pts;
- pkt.stream_index = videoStream->index;
- pkt.data = encodedBuf;
- pkt.size = outSize;
- av_interleaved_write_frame(formatCtx, &pkt);
-
- vframeNum++;
- }
-
-
- //is it as simple as writing an audio packet for every video packet?
- // avcodec_encode_audio2(AVCodecContext *avctx,AVPacket *avpkt,const AVFrame *frame,int *got_packet_ptr);
- AVPacket apkt;
- av_init_packet(&apkt);
- apkt.pts = (int64_t)aframeNum*(aframeInterval*(((float)videoStream->time_base.den)/videoStream->time_base.num));//ofGetFrameNum();//codecCtx->coded_frame->pts;
-
- while(apkt.pts<pkt.pts) {
- apkt.flags |= AV_PKT_FLAG_KEY;
- apkt.dts = apkt.pts;
- apkt.stream_index = audioStream->index;
- //apkt.data = encodedBuf;
- apkt.size = outSize;
-
- AVFrame* afrm=avcodec_alloc_frame();
- afrm->nb_samples=44100/25;
- afrm->format=AV_SAMPLE_FMT_S16;
- uint8_t *d=new uint8_t[afrm->nb_samples*2*2];
- afrm->data[0]=d;
-
- int gpp;
-
- //avcodec_fill_audio_frame(afrm, 2, AV_SAMPLE_FMT_S16,(uint8_t *)d,(44100/25) * 2 * 2,1);
-
- int audioOutSize = avcodec_encode_audio2(acodecCtx,&apkt,afrm,&gpp);
-
- av_interleaved_write_frame(formatCtx, &apkt);
-
- aframeNum++;
- apkt.pts = (int64_t)aframeNum*(aframeInterval*(((float)videoStream->time_base.den)/videoStream->time_base.num));//ofGetFrameNum();//codecCtx->coded_frame->pts;
- }
-
-
- return true;
- }
-
- void ofxMovieExporter::allocateMemory()
- {
- // clear if we need to reallocate
- if(inPixels)
- clearMemory();
-
- inPixels = new unsigned char[inW * inH * 3];
-//#endif
- inFrame = avcodec_alloc_frame();
-
- // allocate output stuff
- int outSize = avpicture_get_size(PIX_FMT_YUV420P, outW, outH);
- outPixels = (unsigned char*)av_malloc(outSize);
- outFrame = avcodec_alloc_frame();
-
- encodedBuf = (unsigned char*)av_malloc(ENCODED_FRAME_BUFFER_SIZE);
- }
-
- void ofxMovieExporter::clearMemory() {
- delete[] inPixels;
-
- inPixels = NULL;
-
- av_free(inFrame);
- av_free(outFrame);
- av_free(encodedBuf);
- av_free(outPixels);
-
- inFrame = NULL;
- outFrame = NULL;
- encodedBuf = NULL;
- outPixels = NULL;
- }
-
- void ofxMovieExporter::initEncoder()
- {
- /////////////////////////////////////////////////////////////
- // find codec
- codec = avcodec_find_encoder(codecId);
- //if (!codec) ofLog(OF_LOG_ERROR, "ofxMovieExporter: Codec not found");
-
-
-
- ////////////////////////////////////////////////////////////
- // auto detect the output format from the name. default is mpeg.
- ostringstream oss;
- oss << "amovie." << container;
- outputFormat = av_guess_format(NULL, oss.str().c_str(), NULL);
- //if (!outputFormat) ofLog(OF_LOG_ERROR, "ofxMovieExporter: Could not guess output container for an %s file (ueuur!!)", container.c_str());
- // set the format codec (the format also has a default codec that can be read from it)
- outputFormat->video_codec = codec->id;
-
- acodec = avcodec_find_encoder(outputFormat->audio_codec);
- //--------------------------->
- //leaving the audio codec at the default for now
- //--------------------------->
-
- /////////////////////////////////////////////////////////////
- // allocate the format context
- formatCtx = avformat_alloc_context();
- //if (!formatCtx) ofLog(OF_LOG_ERROR, "ofxMovieExporter: Could not allocate format context");
- formatCtx->oformat = outputFormat;
-
-
-
- /////////////////////////////////////////////////////////////
- // set up the video stream
- videoStream = avformat_new_stream(formatCtx,codec);
- //videoStream = av_new_stream(formatCtx, 0);
-
-
- /////////////////////////////////////////////////////////////
- // init codec context for video
- codecCtx = videoStream->codec;
- codecCtx->bit_rate = bitRate;
- codecCtx->width = outW;
- codecCtx->height = outH;
-
- codecCtx->time_base.num = 1;//codecCtx->ticks_per_frame;
- codecCtx->time_base.den = frameRate;
- videoStream->time_base = codecCtx->time_base;
- //audioStream->time_base = codecCtx->time_base; //???has the capability of crashing
-
- codecCtx->gop_size = 10; /* emit one intra frame every ten frames */
- codecCtx->pix_fmt = PIX_FMT_YUV420P;
-
- if (codecCtx->codec_id == CODEC_ID_MPEG1VIDEO)
- {
- /* needed to avoid using macroblocks in which some coeffs overflow
- this doesnt happen with normal video, it just happens here as the
- motion of the chroma plane doesnt match the luma plane */
- codecCtx->mb_decision=2;
- }
- // some formats want stream headers to be seperate
- if(!strcmp(formatCtx->oformat->name, "mp4") || !strcmp(formatCtx->oformat->name, "mov") || !strcmp(formatCtx->oformat->name, "3gp"))
- codecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
- // set the output parameters (must be done even if no parameters).
- //if (
- //
- //
- // ???????
-
- //av_set_parameters(formatCtx, NULL);
- //
- //
- //
- //
-
- // < 0) ofLog(OF_LOG_ERROR, "ofxMovieExproter: Could not set format parameters");
-
- AVDictionary *options; //= NULL; causes a forward declaration error!?
- options=NULL;
- // open codec
- //if (
- avcodec_open2(codecCtx, codec,&options);
- // < 0) ofLog(OF_LOG_ERROR, "ofxMovieExproter: Could not open codec");
-
- //do all the same for audio?
- audioStream = av_new_stream(formatCtx, 1); //???
- acodecCtx = audioStream->codec;
- acodecCtx->sample_rate=44100;
- acodecCtx->sample_fmt=AV_SAMPLE_FMT_S16;
- acodecCtx->channels=2;
- acodecCtx->channel_layout=AV_CH_LAYOUT_STEREO;
- avcodec_open2(acodecCtx, acodec,&options);
-
- if (outputFormat->flags & AVFMT_GLOBALHEADER) {
- videoStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
- audioStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
- }
-
-
- av_dump_format(formatCtx, 0, oss.str().c_str(), 1);
- }
-
diff --git a/rotord/ofxMovieExporter.h b/rotord/ofxMovieExporter.h
deleted file mode 100755
index 28447d7..0000000
--- a/rotord/ofxMovieExporter.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * ofxMovieExporter.h
- *
- * Copyright (c) 2011, Neil Mendoza, http://www.neilmendoza.com
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of 16b.it nor the names of its contributors may be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-//#pragma once
-
-#include <unordered_map>
-#include <deque>
-#include <math.h>
-#include <memory>
-
-#include "Poco/Net/HTTPServer.h"
-#include "Poco/Net/HTTPResponse.h"
-#include "Poco/UUID.h"
-#include "Poco/UUIDGenerator.h"
-#include "Poco/Notification.h"
-#include "Poco/NotificationCenter.h"
-#include "Poco/Observer.h"
-#include "Poco/ThreadPool.h"
-#include "Poco/Thread.h"
-#include "Poco/Task.h"
-#include "Poco/Runnable.h"
-#include "Poco/Mutex.h"
-#include "Poco/Random.h"
-#include "Poco/AutoPtr.h"
-#include "Poco/File.h"
-#include "xmlIO.h"
-//#define _THREAD_CAPTURE
-#include <string>
-#include <iostream>
-/*
-#include "ofMain.h"
-
-// needed for gcc on win
-#ifdef TARGET_WIN32
- #ifndef INT64_C
- #define INT64_C(c) (c ## LL)
- #define UINT64_C(c) (c ## ULL)
- #endif
-#endif
-*/
-//#define UINT64_C(c) (c ## ULL)
-
-
-extern "C"
-{
- //needed both
- #include <libavcodec/avcodec.h>
- #include <libavformat/avformat.h>
- #include <libavutil/mathematics.h>
- //needed ofxMovieExporter
- #include <libswscale/swscale.h>
- //rest needed audio loader
- #include <libavutil/opt.h>
- //#include <libavutil/channel_layout.h>
- #include <libavutil/common.h>
- #include <libavutil/imgutils.h>
-
- #include <libavutil/samplefmt.h>
-
- #include <libavutil/dict.h>
- //#include <libavutil/dict.c> stops the compiler error but causes a linker error. does libavcodec need to be statically linked?
- //#include <libavutil/imgutils.h>
- //#include <libavutil/samplefmt.h>
- //#include <libavutil/timestamp.h>
-}
-class ofxMovieExporter
-//#ifdef _THREAD_CAPTURE
-// : public ofThread
-//#endif
- {
- public:
- static const int ENCODED_FRAME_BUFFER_SIZE = 500000;
- // defaults
- static const int BIT_RATE = 4000000;
- static const int FRAME_RATE = 25;
- static const int OUT_W = 640;
- static const int OUT_H = 480;
- static const int INIT_QUEUE_SIZE = 50;
- static const AVCodecID CODEC_ID = CODEC_ID_MPEG4;
- static const std::string FILENAME_PREFIX;
- static const std::string CONTAINER;
-
- ofxMovieExporter();
- ~ofxMovieExporter();
- // tested so far with...
- // codecId = CODEC_ID_MPEG4, container = "mp4"
- // codecId = CODEC_ID_MPEG2VIDEO, container = "mov"
- bool setup(int outW = OUT_W, int outH = OUT_H, int bitRate = BIT_RATE, int frameRate = FRAME_RATE, AVCodecID codecId = CODEC_ID, std::string container = CONTAINER);
- bool record(std::string filePrefix=FILENAME_PREFIX, std::string folderPath="");
- bool encodeFrame(unsigned char *pixels);
- void finishRecord();
-
- void stop();
- bool isRecording() const;
-
- // set an external pixel source, assumes 3 Byte RGB
- // also sets the recording size but does not crop to the recording area
- void setPixelSource(unsigned char* pixels, int w, int h);
-
- // get the number files that have been captured so far
- int getNumCaptures();
-
- // reset the filename counter back to 0
- void resetNumCaptures();
-
- // get the recording size
- inline int getRecordingWidth() {return outW;}
- inline int getRecordingHeight() {return outH;}
-
-
- private:
-//#ifdef _THREAD_CAPTURE
-// void threadedFunction();
-// deque<unsigned char*> frameQueue;
-// deque<unsigned char*> frameMem;
-// ofMutex frameQueueMutex;
-// ofMutex frameMemMutex;
-//#endif
- void initEncoder();
- void allocateMemory();
- void clearMemory();
-
- void encodeFrame();
-
-
- std::string container;
- AVCodecID codecId;
-
- bool recording;
- int numCaptures;
- int frameRate;
- int bitRate;
- float frameInterval;
- float aframeInterval;
- float lastFrameTime;
-
- std::string outFileName;
-
- AVOutputFormat* outputFormat;
- AVFormatContext* formatCtx;
- AVStream* videoStream;
- AVStream* audioStream;
-
- AVCodec* codec;
- AVCodecContext* codecCtx;
-
- AVCodec* acodec;
- AVCodecContext* acodecCtx;
-
-
- SwsContext* convertCtx;
-
- unsigned char* inPixels;
- unsigned char* outPixels;
- unsigned char* encodedBuf;
-
- AVFrame* inFrame;
- AVFrame* outFrame;
-
- int posX, posY;
- int inW, inH;
- int outW, outH;
-
- bool usePixelSource;
- unsigned char* pixelSource;
-
- int aframeNum;
- int vframeNum;
- };
-
diff --git a/rotord/rotor.cpp b/rotord/rotor.cpp
index bd1ace9..446a547 100755
--- a/rotord/rotor.cpp
+++ b/rotord/rotor.cpp
@@ -178,7 +178,8 @@ void Audio_analysis::print_features(){
cerr<<endl;
}
-bool Video_output::render(const float duration, const float framerate,const string &output_filename,const string &audio_filename,float& progress){
+/*
+bool Video_output::render_encoder(const float duration, const float framerate,const string &output_filename,const string &audio_filename,float& progress){
//
//setup defaults
@@ -219,7 +220,7 @@ bool Video_output::render(const float duration, const float framerate,const stri
Image* i=get_output(Frame_spec(vf,framerate,duration,outW,outH));
if (i) {
//exporter->encodeFrame(i->RGBdata);
-
+
//encoder->picture_rgb.pts=;
encoder.write_frame(vf,i->RGBdata);
}
@@ -235,6 +236,76 @@ bool Video_output::render(const float duration, const float framerate,const stri
return false;
}
+*/
+
+bool Video_output::render(const float duration, const float framerate,const string &output_filename,const string &audio_filename,float& progress){
+
+ //
+ //setup defaults
+ int outW=640;
+ int outH=360;
+ int bitRate=4000000;
+ int frameRate=25;
+ AVCodecID codecId=AV_CODEC_ID_MPEG4;
+ std::string container ="mov";
+
+ bool usingaudio=audioloader.setup(audio_filename);
+ //at the moment it crashes if you render before audio is loaded
+
+ float spct=100.0f/duration;
+
+ if (exporter->setup(outW,outH,bitRate,frameRate,container)) { //codecId,
+ if (exporter->record(output_filename)) {
+
+ cerr << "Rotor: Video_output rendering " << duration << " seconds at " << framerate << " fps, audio frame size: " << exporter->get_audio_framesize()<<endl;
+ //25fps video and 43.06640625fps audio? hmm
+ //how to get the timecodes correct for the interleaved files
+
+ float vstep=1.0f/framerate;
+ float v=0.0f;
+ float vf=0.0f;
+ float af=0.0f;
+ while (vf<duration){ //-vstep) {
+ while (!fless(af,vf)) {
+ //insert audio frames until we are ahead of the video
+ exporter->encodeFrame(audioloader.get_samples(exporter->get_audio_framesize()));
+ af+=exporter->get_audio_step();
+
+ }
+
+ /*
+ [mp3 @ 0x7fffe40330e0] max_analyze_duration 5000000 reached at 5015510 microseconds
+ [mp3 @ 0x7fffe4033ec0] Insufficient thread locking around avcodec_open/close()
+ [mp3 @ 0x7fffe40330e0] Estimating duration from bitrate, this may be inaccurate
+ [libx264 @ 0x7fffe8003940] using cpu capabilities: MMX2 SSE2Fast SSSE3 FastShuffle SSE4.2
+ [libx264 @ 0x7fffe8003940] profile High, level 3.0
+ [libx264 @ 0x7fffe8003940] 264 - core 123 r2189 35cf912 - H.264/MPEG-4 AVC codec - Copyleft 2003-2012 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=12 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=10 keyint_min=1 scenecut=40 intra_refresh=0 rc_lookahead=10 rc=abr mbtree=1 bitrate=400 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
+ Assertion ff_avcodec_locked failed at libavcodec/utils.c:2967
+ */
+
+
+ Image* i=get_output(Frame_spec(vf,framerate,duration,outW,outH));
+ if (i) {
+ exporter->encodeFrame(i->RGBdata);
+
+ }
+ vf+=vstep;
+ progress=vf/duration;
+ }
+
+ exporter->finishRecord();
+ cerr << "Rotor: Video_output finished "<< endl;
+
+ audioloader.close();
+
+ return true;
+ }
+ }
+
+ audioloader.close();
+
+ return false;
+}
bool Video_loader::load(const string &filename){
/*
diff --git a/rotord/rotor.h b/rotord/rotor.h
index e9b272c..7ebde45 100755
--- a/rotord/rotor.h
+++ b/rotord/rotor.h
@@ -54,17 +54,16 @@ extern "C" {
}
*/
-#include "ofxMovieExporter.h"
#define AUDIO_INBUF_SIZE 20480
#define AUDIO_REFILL_THRESH 4096
#include "vampHost.h"
#include "xmlIO.h"
-#include "libavaudioloader.h"
+//#include "libavaudioloader.h"
//#include "libavexporter.h"
#include "utils.h" //fequal
-#include "libavwrapper.h"
+#include "libavwrapper_guarded.h"
namespace Rotor {
#define IDLE 0
@@ -686,9 +685,9 @@ namespace Rotor {
Video_output(){};
Video_output(map<string,string> &settings) {
base_settings(settings);
- //exporter=new libav::Exporter();
+ exporter=new libav::exporter();
};
- ~Video_output(){ /*delete exporter;*/ };
+ ~Video_output(){ delete exporter; };
Image *output(const Frame_spec &frame){
if (image_inputs[0]->connection) {
return ((Image_node*)(image_inputs[0]->connection))->get_output(frame);
@@ -700,8 +699,8 @@ namespace Rotor {
private:
//ofxMovieExporter *exporter;
- //libav::Exporter *exporter;
- libav::Audioloader audioloader;
+ libav::exporter *exporter;
+ libav::audioloader audioloader;
//libav::encoder encoder;
};
class Video_loader: public Image_node {
@@ -1184,7 +1183,7 @@ namespace Rotor {
float output_framerate;
bool audio_loaded;
- libav::Audioloader audioloader;
+ //libav::audioloader audioloader;
};
}
diff --git a/rotord/rotord.cbp b/rotord/rotord.cbp
index a0d52df..80f2270 100644
--- a/rotord/rotord.cbp
+++ b/rotord/rotord.cbp
@@ -52,22 +52,17 @@
<Unit filename="Pixels.cpp" />
<Unit filename="Pixels.h" />
<Unit filename="graph.cpp" />
- <Unit filename="gstvideoloader.cpp" />
- <Unit filename="gstvideoloader.h" />
<Unit filename="libavaudioloader.cpp" />
<Unit filename="libavaudioloader.h" />
- <Unit filename="libavexporter.cpp" />
- <Unit filename="libavexporter.h" />
+ <Unit filename="libavwrapper.cpp" />
+ <Unit filename="libavwrapper.h" />
<Unit filename="ofUtils.cpp" />
<Unit filename="ofUtils.h" />
- <Unit filename="ofxMovieExporter.cpp" />
- <Unit filename="ofxMovieExporter.h" />
<Unit filename="rendercontext.cpp" />
<Unit filename="rotor.cpp" />
<Unit filename="rotor.h" />
<Unit filename="rotord.cpp" />
<Unit filename="rotord.h" />
- <Unit filename="rotord.xml" />
<Unit filename="system.h" />
<Unit filename="tinyxml.cpp" />
<Unit filename="tinyxml.h" />
diff --git a/rotord/settings.xml b/rotord/settings.xml
index 500c8d9..387e1c1 100644
--- a/rotord/settings.xml
+++ b/rotord/settings.xml
@@ -1,3 +1,3 @@
<?xml version="1.0" encoding="ISO-8859-1"?>
-<Rotor port="9010" graph_dir="/mnt/rotor/graphs/" media_dir="/mnt/rotor/media/" output_dir="/mnt/rotor/output/" />
+<Rotor port="9010" graph_dir="/media/tim/d78df0e4-122f-48cd-94f9-b56c85c14521/graphs/" media_dir="/media/tim/d78df0e4-122f-48cd-94f9-b56c85c14521/media/" output_dir="/media/tim/d78df0e4-122f-48cd-94f9-b56c85c14521/output/" />