summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Redfern <tim@herge.(none)>2013-05-16 17:10:08 +0100
committerTim Redfern <tim@herge.(none)>2013-05-16 17:10:08 +0100
commitfad2c7236e70faf0669dde3e6cbd51986fdbc6ee (patch)
tree3e639c8ccb12ab8289326eaa657abd96fa108f21
parent8d88180d523d82e5c6535ea03ef22791d0cbd9d0 (diff)
major leakage
-rw-r--r--rotord/02.xml15
-rw-r--r--rotord/libavexporter.h6
-rwxr-xr-xrotord/rotor.h13
-rwxr-xr-xrotord/rotord.cpp4
-rw-r--r--vaa3d_wrapper/FFMpegVideo.cpp737
-rw-r--r--vaa3d_wrapper/FFMpegVideo.h130
-rw-r--r--vaa3d_wrapper/loadV3dFFMpeg.cpp143
-rw-r--r--vaa3d_wrapper/loadV3dFFMpeg.h20
8 files changed, 1051 insertions, 17 deletions
diff --git a/rotord/02.xml b/rotord/02.xml
index dfc13df..3af92e1 100644
--- a/rotord/02.xml
+++ b/rotord/02.xml
@@ -1,16 +1,7 @@
<?xml version="1.0" encoding="ISO-8859-1"?>
<patchbay ID="0f7aa258-7c2f-11e2-abbd-133252267708">Off and on template ©Rotor 2013
- <node ID="01" type="audio_analysis" soname="qm-vamp-plugins" id="qm-tempotracker" output="signal">beats
- </node>
- <node ID="02" type="audio_analysis" soname="qm-vamp-plugins" id="qm-segmenter" output="signal">segmenter
- </node>
- <node ID="04" type="bang" output="signal">outputs 0 except when signal first passes a new integer: then 1
- <signal_input from="01">signal to analyse</signal_input>
- </node>
- <node ID="05" type="signal_output">outputs data when changed
- <signal_input from="04">signal to output</signal_input>
- </node>
- <node ID="06" type="video_output">renders the video
- <image_input from="04">video to output</image_input>
+ <node ID="01" type="testcard" output="image"></node>
+ <node ID="02" type="video_output">renders the video
+ <image_input from="01">video to output</image_input>
</node>
</patchbay>
diff --git a/rotord/libavexporter.h b/rotord/libavexporter.h
index d9d6f38..c5e66e3 100644
--- a/rotord/libavexporter.h
+++ b/rotord/libavexporter.h
@@ -224,7 +224,7 @@ namespace libav {
//?? why is ffmpeg reporting fltp as the sample format??? doesn't seem to have an effect to change this though
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (!samples) {
- free(sampleptr);
+ delete[] sampleptr;
}
if (ret < 0) {
//fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
@@ -464,6 +464,8 @@ namespace libav {
exit(1);
}
frame_count++;
+
+ //avcodec_free_frame(&frame);
}
static void close_video(AVFormatContext *oc, AVStream *st)
@@ -472,7 +474,7 @@ namespace libav {
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
av_free(frame);
- //av_free(outPixels); SIGSEV here
+ av_free(outPixels); //SIGSEV here???
}
class Exporter {
diff --git a/rotord/rotor.h b/rotord/rotor.h
index e128fd5..d8ee021 100755
--- a/rotord/rotor.h
+++ b/rotord/rotor.h
@@ -486,7 +486,7 @@ namespace Rotor {
base_settings(settings);
divide_amount=ofToFloat(find_setting(settings,"amount"));
for (auto p:parameter_inputs){
- if (p->parameter=="divide_amount") p->receiver=&divide_amount;
+ if (p->parameter=="amount") p->receiver=&divide_amount;
}
};
Signal_divide* clone(map<string,string> &_settings) { return new Signal_divide(_settings);};
@@ -789,7 +789,10 @@ namespace Rotor {
if (p->parameter=="mode") p->receiver=&mode;
}
};
- ~Echo_trails(){if (image) {delete image;} };
+ //~Echo_trails(){if (image) {delete image;} };
+ ~Echo_trails(){
+ for (auto i:images) {delete i.second;}
+ };
Image *output(const Frame_spec &frame){
//check if cache is valid
if (frame.w!=image->w||frame.h!=image->h){ //or framerate changed?
@@ -857,7 +860,7 @@ namespace Rotor {
return nullptr;
}
Echo_trails* clone(map<string,string> &_settings) { return new Echo_trails(_settings);};
- private:
+ protected:
float duration,fadeto;
int number;
int interval,total,lastframe; //number of frames between displayed echoes
@@ -868,6 +871,9 @@ namespace Rotor {
class Node_factory{
public:
Node_factory();
+ ~Node_factory(){
+ for (auto t:type_map) delete t.second;
+ }
void add_type(string type,Node* proto){
type_map[type]=proto;
};
@@ -938,6 +944,7 @@ namespace Rotor {
output_framerate=25.0f;
audio_loaded=false;
};
+ ~Render_context(){delete audio_thumb;};
void runTask();
void add_queue(int item);
Command_response session_command(const std::vector<std::string>& command);
diff --git a/rotord/rotord.cpp b/rotord/rotord.cpp
index 1a68638..bf1be02 100755
--- a/rotord/rotord.cpp
+++ b/rotord/rotord.cpp
@@ -188,6 +188,9 @@ HTTPRequestHandler* RotorRequestHandlerFactory::createRequestHandler(const HTTPS
content="<status>Rotor: bad request</status>\n";
}
}
+ else if (command[0]=="exit") {
+ exit(0);
+ }
else {
bool found=false;
for (auto& task: manager.taskList()) { //c++11
@@ -290,3 +293,4 @@ int RotorServer::main(const std::vector<std::string>& args){
}
return Application::EXIT_OK;
}
+ \ No newline at end of file
diff --git a/vaa3d_wrapper/FFMpegVideo.cpp b/vaa3d_wrapper/FFMpegVideo.cpp
new file mode 100644
index 0000000..399319b
--- /dev/null
+++ b/vaa3d_wrapper/FFMpegVideo.cpp
@@ -0,0 +1,737 @@
+#include "FFMpegVideo.h"
+
+#ifdef USE_FFMPEG
+
+extern "C"
+{
+#include <libswscale/swscale.h>
+}
+
+#include <QNetworkReply>
+#include <QNetworkRequest>
+#include <QEventLoop>
+#include <QFileInfo>
+#include <QMutexLocker>
+#include <QDebug>
+#include <stdexcept>
+#include <iostream>
+#include <cassert>
+
+using namespace std;
+
+// Translated to C++ by Christopher Bruns May 2012
+// from ffmeg_adapt.c in whisk package by Nathan Clack, Mark Bolstadt, Michael Meeuwisse
+
+QMutex FFMpegVideo::mutex;
+
+// Avoid link error on some macs
+#ifdef __APPLE__
+extern "C" {
+#include <stdlib.h>
+#include <errno.h>
+// #include "compiler/compiler.h"
+
+/*
+ * Darwin doesn't have posix_memalign(), provide a private
+ * weak alternative
+ */
+ /*
+int __weak posix_memalign(void **ptr, size_t align, size_t size)
+{
+ if (*ptr)
+ return 0;
+
+ return ENOMEM;
+}
+*/
+}
+#endif
+
+// Custom read function so FFMPEG does not need to read from a local file by name.
+// But rather from a stream derived from a URL or whatever.
+extern "C" {
+
+int readFunction(void* opaque, uint8_t* buf, int buf_size)
+{
+ QIODevice* stream = (QIODevice*)opaque;
+ int numBytes = stream->read((char*)buf, buf_size);
+ return numBytes;
+}
+
+// http://cdry.wordpress.com/2009/09/09/using-custom-io-callbacks-with-ffmpeg/
+int64_t seekFunction(void* opaque, int64_t offset, int whence)
+{
+ QIODevice* stream = (QIODevice*)opaque;
+ if (stream == NULL)
+ return -1;
+ else if (whence == AVSEEK_SIZE)
+ return -1; // "size of my handle in bytes"
+ else if (stream->isSequential())
+ return -1; // cannot seek a sequential stream
+ else if (whence == SEEK_CUR) { // relative to start of file
+ if (! stream->seek(stream->pos() + offset) )
+ return -1;
+ }
+ else if (whence == SEEK_END) { // relative to end of file
+ assert(offset < 0);
+ if (! stream->seek(stream->size() + offset) )
+ return -1;
+ }
+ else if (whence == SEEK_SET) { // relative to start of file
+ if (! stream->seek(offset) )
+ return -1;
+ }
+ else {
+ assert(false);
+ }
+ return stream->pos();
+}
+
+}
+
+
+/////////////////////////////
+// AVPacketWrapper methods //
+/////////////////////////////
+
+class AVPacketWrapper
+{
+public:
+ AVPacketWrapper();
+ virtual ~AVPacketWrapper();
+ void free();
+
+ AVPacket packet;
+};
+
+
+AVPacketWrapper::AVPacketWrapper()
+{
+ packet.destruct = NULL;
+}
+
+/* virtual */
+AVPacketWrapper::~AVPacketWrapper()
+{
+ free();
+}
+
+void AVPacketWrapper::free()
+{
+ av_free_packet(&packet);
+}
+
+
+/////////////////////////
+// FFMpegVideo methods //
+/////////////////////////
+
+FFMpegVideo::FFMpegVideo(PixelFormat pixelFormat)
+ : isOpen(false)
+{
+ initialize();
+ format = pixelFormat;
+}
+
+FFMpegVideo::FFMpegVideo(QUrl url, PixelFormat pixelFormat)
+ : isOpen(false)
+{
+ QMutexLocker lock(&FFMpegVideo::mutex);
+ initialize();
+ format = pixelFormat;
+ isOpen = open(url, pixelFormat);
+}
+
+/* virtual */
+FFMpegVideo::~FFMpegVideo()
+{
+ QMutexLocker lock(&FFMpegVideo::mutex);
+ if (NULL != Sctx) {
+ sws_freeContext(Sctx);
+ Sctx = NULL;
+ }
+ if (NULL != pRaw) {
+ av_free(pRaw);
+ pRaw = NULL;
+ }
+ if (NULL != pFrameRGB) {
+ av_free(pFrameRGB);
+ pFrameRGB = NULL;
+ }
+ if (NULL != pCtx) {
+ avcodec_close(pCtx);
+ pCtx = NULL;
+ }
+ if (NULL != container) {
+ avformat_close_input(&container);
+ container = NULL;
+ }
+ if (NULL != buffer) {
+ av_free(buffer);
+ buffer = NULL;
+ }
+ if (NULL != blank) {
+ av_free(blank);
+ blank = NULL;
+ }
+ /*
+ if (NULL != avioContext) {
+ av_free(avioContext);
+ avioContext = NULL;
+ }
+ */
+ if (reply != NULL) {
+ reply->deleteLater();
+ reply = NULL;
+ }
+ // Don't need to free pCodec?
+}
+
+bool FFMpegVideo::open(QUrl url, enum PixelFormat formatParam)
+{
+ if (url.isEmpty())
+ return false;
+
+ // Is the movie source a local file?
+ if (url.host() == "localhost")
+ url.setHost("");
+ QString fileName = url.toLocalFile();
+ if ( (! fileName.isEmpty())
+ && (QFileInfo(fileName).exists()) )
+ {
+ // return open(fileName, formatParam); // for testing only
+
+ // Yes, the source is a local file
+ fileStream.setFileName(fileName);
+ // qDebug() << fileName;
+ if (! fileStream.open(QIODevice::ReadOnly))
+ return false;
+ return open(fileStream, fileName, formatParam);
+ }
+
+ // ...No, the source is not a local file
+ if (url.host() == "")
+ url.setHost("localhost");
+ fileName = url.path();
+
+ // http://stackoverflow.com/questions/9604633/reading-a-file-located-in-memory-with-libavformat
+ // Load from URL
+ QEventLoop loop; // for synchronous url fetch http://stackoverflow.com/questions/5486090/qnetworkreply-wait-for-finished
+ QObject::connect(&networkManager, SIGNAL(finished(QNetworkReply*)),
+ &loop, SLOT(quit()));
+ QNetworkRequest request = QNetworkRequest(url);
+ // qDebug() << "networkManager" << __FILE__ << __LINE__;
+ reply = networkManager.get(request);
+ loop.exec();
+ if (reply->error() != QNetworkReply::NoError) {
+ // qDebug() << reply->error();
+ reply->deleteLater();
+ reply = NULL;
+ return false;
+ }
+ QIODevice * stream = reply;
+ // Mpeg needs seekable device, so create in-memory buffer if necessary
+ if (stream->isSequential()) {
+ byteArray = stream->readAll();
+ fileBuffer.setBuffer(&byteArray);
+ fileBuffer.open(QIODevice::ReadOnly);
+ if (! fileBuffer.seek(0))
+ return false;
+ stream = &fileBuffer;
+ assert(! stream->isSequential());
+ }
+ bool result = open(*stream, fileName, formatParam);
+ return result;
+}
+
+bool FFMpegVideo::open(QIODevice& fileStream, QString& fileName, enum PixelFormat formatParam)
+{
+ // http://stackoverflow.com/questions/9604633/reading-a-file-located-in-memory-with-libavformat
+ // I think AVIOContext is the trick used to redivert the input stream
+ ioBuffer = (unsigned char *)av_malloc(ioBufferSize + FF_INPUT_BUFFER_PADDING_SIZE); // can get av_free()ed by libav
+ avioContext = avio_alloc_context(ioBuffer, ioBufferSize, 0, (void*)(&fileStream), &readFunction, NULL, &seekFunction);
+ container = avformat_alloc_context();
+ container->pb = avioContext;
+
+ // Open file, check usability
+ std::string fileNameStd = fileName.toStdString();
+ if (!avtry( avformat_open_input(&container, fileNameStd.c_str(), NULL, NULL), fileNameStd ))
+ return false;
+ return openUsingInitializedContainer(formatParam);
+}
+
+// file name based method for historical continuity
+bool FFMpegVideo::open(QString& fileName, enum PixelFormat formatParam)
+{
+ // Open file, check usability
+ std::string fileNameStd = fileName.toStdString();
+ if (!avtry( avformat_open_input(&container, fileNameStd.c_str(), NULL, NULL), fileNameStd ))
+ return false;
+ return openUsingInitializedContainer(formatParam);
+}
+
+
+bool FFMpegVideo::openUsingInitializedContainer(enum PixelFormat formatParam)
+{
+ format = formatParam;
+ sc = getNumberOfChannels();
+
+ if (!avtry( avformat_find_stream_info(container, NULL), "Cannot find stream information." ))
+ return false;
+ if (!avtry( videoStream=av_find_best_stream(container, AVMEDIA_TYPE_VIDEO, -1, -1, &pCodec, 0), "Cannot find a video stream." ))
+ return false;
+ pCtx=container->streams[videoStream]->codec;
+ width = pCtx->width;
+ height = pCtx->height;
+ if (!avtry( avcodec_open2(pCtx, pCodec, NULL), "Cannot open video decoder." ))
+ return false;
+
+ /* Frame rate fix for some codecs */
+ if( pCtx->time_base.num > 1000 && pCtx->time_base.den == 1 )
+ pCtx->time_base.den = 1000;
+
+ /* Compute the total number of frames in the file */
+ /* duration is in microsecs */
+ numFrames = (int)(( container->duration / (double)AV_TIME_BASE ) * pCtx->time_base.den + 0.5);
+
+ /* Get framebuffers */
+ if (! (pRaw = avcodec_alloc_frame()) )
+ throw std::runtime_error("");
+ if (! (pFrameRGB = avcodec_alloc_frame()) )
+ throw std::runtime_error("");
+
+ /* Create data buffer */
+ if (format == PIX_FMT_NONE) {
+ numBytes = 0;
+ buffer = NULL;
+ blank = NULL;
+ pFrameRGB = NULL;
+ Sctx = NULL;
+ }
+ else {
+ numBytes = avpicture_get_size( format, pCtx->width, pCtx->height ); // RGB24 format
+ if (! (buffer = (uint8_t*)av_malloc(numBytes + FF_INPUT_BUFFER_PADDING_SIZE)) ) // RGB24 format
+ throw std::runtime_error("");
+ if (! (blank = (uint8_t*)av_mallocz(avpicture_get_size(pCtx->pix_fmt,width,height))) ) // native codec format
+ throw std::runtime_error("");
+
+ /* Init buffers */
+ avpicture_fill( (AVPicture * ) pFrameRGB, buffer, format,
+ pCtx->width, pCtx->height );
+
+ /* Init scale & convert */
+ if (! (Sctx=sws_getContext(
+ pCtx->width,
+ pCtx->height,
+ pCtx->pix_fmt,
+ width,
+ height,
+ format,
+ SWS_POINT, // fastest?
+ NULL,NULL,NULL)) )
+ throw std::runtime_error("");
+ }
+
+ /* Give some info on stderr about the file & stream */
+ //dump_format(container, 0, fname, 0);
+
+ previousFrameIndex = -1;
+ return true;
+}
+
+bool FFMpegVideo::fetchFrame(int targetFrameIndex)
+{
+ if ((targetFrameIndex < 0) || (targetFrameIndex > numFrames))
+ return false;
+ if (targetFrameIndex == (previousFrameIndex + 1)) {
+ if (! readNextFrame(targetFrameIndex))
+ return false;
+ }
+ else
+ if (seekToFrame(targetFrameIndex) < 0)
+ return false;
+ previousFrameIndex = targetFrameIndex;
+ return true;
+}
+
+// \returns current frame on success, otherwise -1
+int FFMpegVideo::seekToFrame(int targetFrameIndex)
+{
+ int64_t duration = container->streams[videoStream]->duration;
+ int64_t ts = av_rescale(duration,targetFrameIndex,numFrames);
+ int64_t tol = av_rescale(duration,1,2*numFrames);
+ if ( (targetFrameIndex < 0) || (targetFrameIndex >= numFrames) ) {
+ return -1;
+ }
+ int result = avformat_seek_file( container, //format context
+ videoStream,//stream id
+ 0, //min timestamp
+ ts, //target timestamp
+ ts, //max timestamp
+ 0); //AVSEEK_FLAG_ANY),//flags
+ if (result < 0)
+ return -1;
+
+ avcodec_flush_buffers(pCtx);
+ if (! readNextFrame(targetFrameIndex))
+ return -1;
+
+ return targetFrameIndex;
+}
+
+bool FFMpegVideo::readNextFrame(int targetFrameIndex)
+{
+ AVPacket packet = {0};
+ av_init_packet(&packet);
+ bool result = readNextFrameWithPacket(targetFrameIndex, packet, pRaw);
+ av_free_packet(&packet);
+ return result;
+}
+
+// WARNING this method can raise an exception
+bool FFMpegVideo::readNextFrameWithPacket(int targetFrameIndex, AVPacket& packet, AVFrame* pYuv)
+{
+ int finished = 0;
+ do {
+ finished = 0;
+ av_free_packet(&packet);
+ int result;
+ if (!avtry(av_read_frame( container, &packet ), "Failed to read frame"))
+ return false; // !!NOTE: see docs on packet.convergence_duration for proper seeking
+ if( packet.stream_index != videoStream ) /* Is it what we're trying to parse? */
+ continue;
+ if (!avtry(avcodec_decode_video2( pCtx, pYuv, &finished, &packet ), "Failed to decode video"))
+ return false;
+ // handle odd cases and debug
+ if((pCtx->codec_id==CODEC_ID_RAWVIDEO) && !finished)
+ {
+ avpicture_fill( (AVPicture * ) pYuv, blank, pCtx->pix_fmt,width, height ); // set to blank frame
+ finished = 1;
+ }
+#if 0 // very useful for debugging
+ cout << "Packet - pts:" << (int)packet.pts;
+ cout << " dts:" << (int)packet.dts;
+ cout << " - flag: " << packet.flags;
+ cout << " - finished: " << finished;
+ cout << " - Frame pts:" << (int)pYuv->pts;
+ cout << " " << (int)pYuv->best_effort_timestamp;
+ cout << endl;
+ /* printf("Packet - pts:%5d dts:%5d (%5d) - flag: %1d - finished: %3d - Frame pts:%5d %5d\n",
+ (int)packet.pts,(int)packet.dts,
+ packet.flags,finished,
+ (int)pYuv->pts,(int)pYuv->best_effort_timestamp); */
+#endif
+ if(!finished) {
+ if (packet.pts == AV_NOPTS_VALUE)
+ throw std::runtime_error("");
+ if (packet.size == 0) // packet.size==0 usually means EOF
+ break;
+ }
+ } while ( (!finished) || (pYuv->best_effort_timestamp < targetFrameIndex));
+
+ av_free_packet(&packet);
+
+ if (format != PIX_FMT_NONE) {
+ sws_scale(Sctx, // sws context
+ pYuv->data, // src slice
+ pYuv->linesize, // src stride
+ 0, // src slice origin y
+ pCtx->height, // src slice height
+ pFrameRGB->data, // dst
+ pFrameRGB->linesize ); // dst stride
+ }
+
+ previousFrameIndex = targetFrameIndex;
+ return true;
+}
+
+uint8_t FFMpegVideo::getPixelIntensity(int x, int y, Channel c) const
+{
+ return *(pFrameRGB->data[0] + y * pFrameRGB->linesize[0] + x * sc + c);
+}
+
+int FFMpegVideo::getNumberOfFrames() const { return numFrames; }
+
+int FFMpegVideo::getWidth() const { return width; }
+
+int FFMpegVideo::getHeight() const { return height; }
+
+int FFMpegVideo::getNumberOfChannels() const
+{
+ switch(format)
+ {
+ case PIX_FMT_BGRA:
+ return 4;
+ break;
+ case PIX_FMT_RGB24:
+ return 3;
+ break;
+ case PIX_FMT_GRAY8:
+ return 1;
+ break;
+ default:
+ return 0;
+ break;
+ }
+ return 0;
+}
+
+void FFMpegVideo::initialize()
+{
+ Sctx = NULL;
+ pRaw = NULL;
+ pFrameRGB = NULL;
+ pCtx = NULL;
+ container = NULL;
+ buffer = NULL;
+ blank = NULL;
+ pCodec = NULL;
+ format = PIX_FMT_NONE;
+ reply = NULL;
+ ioBuffer = NULL;
+ avioContext = NULL;
+ FFMpegVideo::maybeInitFFMpegLib();
+}
+
+void FFMpegVideo::maybeInitFFMpegLib()
+{
+ if (FFMpegVideo::b_is_one_time_inited)
+ return;
+ av_register_all();
+ avcodec_register_all();
+ avformat_network_init();
+ FFMpegVideo::b_is_one_time_inited = true;
+}
+
+bool FFMpegVideo::avtry(int result, const std::string& msg) {
+ if ((result < 0) && (result != AVERROR_EOF)) {
+ char buf[1024];
+ av_strerror(result, buf, sizeof(buf));
+ std::string message = std::string("FFMpeg Error: ") + msg + buf;
+ qDebug() << QString(message.c_str());
+ return false;
+ }
+ return true;
+}
+
+bool FFMpegVideo::b_is_one_time_inited = false;
+
+
+
+///////////////////////////
+// FFMpegEncoder methods //
+///////////////////////////
+
+
+FFMpegEncoder::FFMpegEncoder(const char * file_name, int width, int height, enum CodecID codec_id)
+ : picture_yuv(NULL)
+ , picture_rgb(NULL)
+ , container(NULL)
+{
+ if (0 != (width % 2))
+ cerr << "WARNING: Video width is not a multiple of 2" << endl;
+ if (0 != (height % 2))
+ cerr << "WARNING: Video height is not a multiple of 2" << endl;
+
+ FFMpegVideo::maybeInitFFMpegLib();
+
+ container = avformat_alloc_context();
+ if (NULL == container)
+ throw std::runtime_error("Unable to allocate format context");
+
+ AVOutputFormat * fmt = av_guess_format(NULL, file_name, NULL);
+ if (!fmt)
+ fmt = av_guess_format("mpeg", NULL, NULL);
+ if (!fmt)
+ throw std::runtime_error("Unable to deduce video format");
+ container->oformat = fmt;
+
+ fmt->video_codec = codec_id;
+ // fmt->video_codec = CODEC_ID_H264; // fails to write
+
+ AVStream * video_st = avformat_new_stream(container, NULL);
+
+ pCtx = video_st->codec;
+ pCtx->codec_id = fmt->video_codec;
+ pCtx->codec_type = AVMEDIA_TYPE_VIDEO;
+ // resolution must be a multiple of two
+ pCtx->width = width;
+ pCtx->height = height;
+
+ // bit_rate determines image quality
+ pCtx->bit_rate = width * height * 4; // ?
+ // pCtx->qmax = 50; // no effect?
+
+ // "high quality" parameters from http://www.cs.ait.ac.th/~on/mplayer/pl/menc-feat-enc-libavcodec.html
+ // vcodec=mpeg4:mbd=2:mv0:trell:v4mv:cbp:last_pred=3:predia=2:dia=2:vmax_b_frames=2:vb_strategy=1:precmp=2:cmp=2:subcmp=2:preme=2:vme=5:naq:qns=2
+ if (false) // does not help
+ // if (pCtx->codec_id == CODEC_ID_MPEG4)
+ {
+ pCtx->mb_decision = 2;
+ pCtx->last_predictor_count = 3;
+ pCtx->pre_dia_size = 2;
+ pCtx->dia_size = 2;
+ pCtx->max_b_frames = 2;
+ pCtx->b_frame_strategy = 2;
+ pCtx->trellis = 2;
+ pCtx->compression_level = 2;
+ pCtx->global_quality = 300;
+ pCtx->pre_me = 2;
+ pCtx->mv0_threshold = 1;
+ // pCtx->quantizer_noise_shaping = 2; // deprecated
+ // TODO
+ }
+
+ pCtx->time_base = (AVRational){1, 25};
+ // pCtx->time_base = (AVRational){1, 10};
+ pCtx->gop_size = 12; // emit one intra frame every twelve frames
+ // pCtx->max_b_frames = 0;
+ pCtx->pix_fmt = PIX_FMT_YUV420P;
+ if (fmt->flags & AVFMT_GLOBALHEADER)
+ pCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
+
+ if (pCtx->codec_id == CODEC_ID_H264)
+ {
+ // http://stackoverflow.com/questions/3553003/encoding-h-264-with-libavcodec-x264
+ pCtx->coder_type = 1; // coder = 1
+ pCtx->flags|=CODEC_FLAG_LOOP_FILTER; // flags=+loop
+ pCtx->me_cmp|= 1; // cmp=+chroma, where CHROMA = 1
+ // pCtx->partitions|=X264_PART_I8X8+X264_PART_I4X4+X264_PART_P8X8+X264_PART_B8X8; // partitions=+parti8x8+parti4x4+partp8x8+partb8x8
+ pCtx->me_method=ME_HEX; // me_method=hex
+ pCtx->me_subpel_quality = 7; // subq=7
+ pCtx->me_range = 16; // me_range=16
+ pCtx->gop_size = 250; // g=250
+ pCtx->keyint_min = 25; // keyint_min=25
+ pCtx->scenechange_threshold = 40; // sc_threshold=40
+ pCtx->i_quant_factor = 0.71; // i_qfactor=0.71
+ pCtx->b_frame_strategy = 1; // b_strategy=1
+ pCtx->qcompress = 0.6; // qcomp=0.6
+ pCtx->qmin = 10; // qmin=10
+ pCtx->qmax = 51; // qmax=51
+ pCtx->max_qdiff = 4; // qdiff=4
+ pCtx->max_b_frames = 3; // bf=3
+ pCtx->refs = 3; // refs=3
+ // pCtx->directpred = 1; // directpred=1
+ pCtx->trellis = 1; // trellis=1
+ // pCtx->flags2|=CODEC_FLAG2_BPYRAMID+CODEC_FLAG2_MIXED_REFS+CODEC_FLAG2_WPRED+CODEC_FLAG2_8X8DCT+CODEC_FLAG2_FASTPSKIP; // flags2=+bpyramid+mixed_refs+wpred+dct8x8+fastpskip
+ // pCtx->weighted_p_pred = 2; // wpredp=2
+ // libx264-main.ffpreset preset
+ // pCtx->flags2|=CODEC_FLAG2_8X8DCT;
+ // pCtx->flags2^=CODEC_FLAG2_8X8DCT; // flags2=-dct8x8
+ }
+
+ AVCodec * codec = avcodec_find_encoder(pCtx->codec_id);
+ if (NULL == codec)
+ throw std::runtime_error("Unable to find Mpeg4 codec");
+ if (codec->pix_fmts)
+ pCtx->pix_fmt = codec->pix_fmts[0];
+ {
+ QMutexLocker lock(&FFMpegVideo::mutex);
+ if (avcodec_open2(pCtx, codec, NULL) < 0)
+ throw std::runtime_error("Error opening codec");
+ }
+
+ /* Get framebuffers */
+ if (! (picture_yuv = avcodec_alloc_frame()) ) // final frame format
+ throw std::runtime_error("");
+ if (! (picture_rgb = avcodec_alloc_frame()) ) // rgb version I can understand easily
+ throw std::runtime_error("");
+ /* the image can be allocated by any means and av_image_alloc() is
+ * just the most convenient way if av_malloc() is to be used */
+ if ( av_image_alloc(picture_yuv->data, picture_yuv->linesize,
+ pCtx->width, pCtx->height, pCtx->pix_fmt, 1) < 0 )
+ throw std::runtime_error("Error allocating YUV frame buffer");
+ if ( av_image_alloc(picture_rgb->data, picture_rgb->linesize,
+ pCtx->width, pCtx->height, PIX_FMT_RGB24, 1) < 0 )
+ throw std::runtime_error("Error allocating RGB frame buffer");
+
+ /* Init scale & convert */
+ if (! (Sctx=sws_getContext(
+ width,
+ height,
+ PIX_FMT_RGB24,
+ pCtx->width,
+ pCtx->height,
+ pCtx->pix_fmt,
+ SWS_BICUBIC,NULL,NULL,NULL)) )
+ throw std::runtime_error("");
+
+ /* open the output file */
+ if (!(fmt->flags & AVFMT_NOFILE))
+ {
+ QMutexLocker lock(&FFMpegVideo::mutex);
+ if (avio_open(&container->pb, file_name, AVIO_FLAG_WRITE) < 0)
+ throw std::runtime_error("Error opening output video file");
+ }
+ avformat_write_header(container, NULL);
+}
+
+void FFMpegEncoder::setPixelIntensity(int x, int y, int c, uint8_t value)
+{
+ uint8_t * ptr = picture_rgb->data[0] + y * picture_rgb->linesize[0] + x * 3 + c;
+ *ptr = value;
+}
+
+void FFMpegEncoder::write_frame()
+{
+ // convert from RGB24 to YUV
+ sws_scale(Sctx, // sws context
+ picture_rgb->data, // src slice
+ picture_rgb->linesize, // src stride
+ 0, // src slice origin y
+ pCtx->height, // src slice height
+ picture_yuv->data, // dst
+ picture_yuv->linesize ); // dst stride
+
+ /* encode the image */
+ // use non-deprecated avcodec_encode_video2(...)
+ AVPacket packet;
+ av_init_packet(&packet);
+ packet.data = NULL;
+ packet.size = 0;
+
+ int got_packet;
+ int ret = avcodec_encode_video2(pCtx,
+ &packet,
+ picture_yuv,
+ &got_packet);
+ if (ret < 0)
+ throw std::runtime_error("Video encoding failed");
+ if (got_packet)
+ {
+ // std::cout << "encoding frame" << std::endl;
+ int result = av_write_frame(container, &packet);
+ av_destruct_packet(&packet);
+ }
+}
+
+/* virtual */
+FFMpegEncoder::~FFMpegEncoder()
+{
+ int result = av_write_frame(container, NULL); // flush
+ result = av_write_trailer(container);
+ {
+ QMutexLocker lock(&FFMpegVideo::mutex);
+ avio_close(container->pb);
+ }
+ for (int i = 0; i < container->nb_streams; ++i)
+ av_freep(container->streams[i]);
+ av_free(container);
+ container = NULL;
+
+ {
+ QMutexLocker lock(&FFMpegVideo::mutex);
+ avcodec_close(pCtx);
+ }
+ av_free(pCtx);
+ pCtx = NULL;
+ av_free(picture_yuv->data[0]);
+ av_free(picture_yuv);
+ picture_yuv = NULL;
+ av_free(picture_rgb->data[0]);
+ av_free(picture_rgb);
+ picture_rgb = NULL;
+}
+
+#endif // USE_FFMPEG
+
diff --git a/vaa3d_wrapper/FFMpegVideo.h b/vaa3d_wrapper/FFMpegVideo.h
new file mode 100644
index 0000000..b0d9da7
--- /dev/null
+++ b/vaa3d_wrapper/FFMpegVideo.h
@@ -0,0 +1,130 @@
+#ifndef FFMPEGVIDEO_H
+#define FFMPEGVIDEO_H
+
+/*
+ * FFMpegVideo.h
+ * May 2012 Christopher Bruns
+ * The FFMpegVideo class is a C++ wrapper around the poorly documented
+ * libavcodec movie API used by ffmpeg. I made extensive use of Nathan
+ * Clack's implemention in the whisk project.
+ *
+ * The FFMpegVideo.h and FFMpegVideo.cpp files depend only on the libavcodec
+ * and allied sets of libraries. To compartmentalize and reduce dependencies
+ * I placed the Vaa3d specific use of this class into a separate set of
+ * source files: loadV3dFFMpeg.h/cpp
+ */
+
+#ifdef USE_FFMPEG
+
+extern "C" {
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavutil/pixfmt.h>
+#include <libavutil/opt.h>
+#include <libavutil/imgutils.h>
+}
+
+#include <QFile>
+#include <QNetworkAccessManager>
+#include <QMutex>
+#include <QUrl>
+#include <QBuffer>
+#include <string>
+#include <stdexcept>
+#include <iostream>
+
+// Translated to C++ by Christopher Bruns May 2012
+// from ffmeg_adapt.c in whisk package by Nathan Clack, Mark Bolstadt, Michael Meeuwisse
+class FFMpegVideo
+{
+public:
+ enum Channel {
+ RED = 0,
+ GRAY = 0,
+ GREEN = 1,
+ BLUE = 2,
+ ALPHA = 3
+ };
+
+ // Some libavcodec calls are not reentrant
+ static QMutex mutex;
+ static void maybeInitFFMpegLib();
+
+ FFMpegVideo(PixelFormat pixelFormat=PIX_FMT_RGB24);
+ FFMpegVideo(QUrl url, PixelFormat pixelFormat=PIX_FMT_RGB24);
+ virtual ~FFMpegVideo();
+ bool open(QUrl url, enum PixelFormat formatParam = PIX_FMT_RGB24);
+ bool open(QIODevice& fileStream, QString& fileName, enum PixelFormat formatParam = PIX_FMT_RGB24);
+ uint8_t getPixelIntensity(int x, int y, Channel c = GRAY) const;
+ bool fetchFrame(int targetFrameIndex = 0);
+ int getNumberOfFrames() const;
+ int getWidth() const;
+ int getHeight() const;
+ int getNumberOfChannels() const;
+ bool readNextFrame(int targetFrameIndex = 0);
+ bool readNextFrameWithPacket(int targetFrameIndex, AVPacket& packet, AVFrame* pYuv);
+ int seekToFrame(int targetFrameIndex = 0);
+
+ // make certain members public, for use by Fast3DTexture class
+ AVFrame *pFrameRGB;
+ AVFrame *pRaw;
+ AVFormatContext *container;
+ AVCodecContext *pCtx;
+ int videoStream;
+ int previousFrameIndex;
+ bool isOpen;
+
+protected:
+ static bool b_is_one_time_inited;
+
+ void initialize();
+ bool open(QString& fileName, enum PixelFormat formatParam);
+ bool openUsingInitializedContainer(enum PixelFormat formatParam);
+ static bool avtry(int result, const std::string& msg);
+
+ AVCodec *pCodec;
+ uint8_t *buffer,
+ *blank;
+ struct SwsContext *Sctx;
+ int width, height;
+ PixelFormat format;
+ size_t numBytes;
+ int numFrames;
+ int sc; // number of color channels
+
+ // For loading from URL
+ static const int ioBufferSize = 32768;
+ unsigned char * ioBuffer;
+ QNetworkAccessManager networkManager;
+ AVIOContext* avioContext;
+ QFile fileStream;
+ QNetworkReply* reply;
+ QBuffer fileBuffer;
+ QByteArray byteArray;
+};
+
+
+// TODO - finish refactoring based on
+// http://svn.gnumonks.org/trunk/21c3-video/ffmpeg/ffmpeg-0.4.9-pre1/output_example.c
+class FFMpegEncoder
+{
+public:
+ typedef FFMpegVideo::Channel Channel;
+
+ FFMpegEncoder(const char * file_name, int width, int height, enum CodecID codec_id = CODEC_ID_MPEG4);
+ virtual ~FFMpegEncoder();
+ void setPixelIntensity(int x, int y, int c, uint8_t value);
+ void write_frame();
+
+protected:
+ AVFormatContext *container;
+ AVCodecContext *pCtx;
+ AVFrame *picture_yuv;
+ AVFrame *picture_rgb;
+ struct SwsContext *Sctx;
+};
+
+
+#endif // USE_FFMPEG
+
+#endif // FFMPEGVIDEO_H
diff --git a/vaa3d_wrapper/loadV3dFFMpeg.cpp b/vaa3d_wrapper/loadV3dFFMpeg.cpp
new file mode 100644
index 0000000..90a6dbc
--- /dev/null
+++ b/vaa3d_wrapper/loadV3dFFMpeg.cpp
@@ -0,0 +1,143 @@
+#include "loadV3dFFMpeg.h"
+#include "FFMpegVideo.h"
+#include <iostream>
+
+#ifdef USE_FFMPEG
+
+using namespace std;
+
+bool saveStackFFMpeg(const char * file_name, const My4DImage& img, enum CodecID codec_id)
+{
+ try {
+ Image4DProxy<My4DImage> proxy(const_cast<My4DImage*>(&img));
+ double default_irange = 1.0; // assumes data range is 0-255.0
+ if (proxy.su > 1) {
+ default_irange = 1.0 / 16.0; // 0-4096, like our microscope images
+ }
+ std::vector<double> imin(proxy.sc, 0.0);
+ std::vector<double> irange2(proxy.sc, default_irange);
+ // rescale if converting from 16 bit to 8 bit
+ if (proxy.su > 1) {
+ if (img.p_vmin && img.p_vmax)
+ proxy.set_minmax(img.p_vmin, img.p_vmax);
+ if (proxy.has_minmax()) {
+ for (int c = 0; c < proxy.sc; ++c) {
+ imin[c] = proxy.vmin[c];
+ irange2[c] = 255.0 / (proxy.vmax[c] - proxy.vmin[c]);
+ }
+ }
+ }
+ FFMpegEncoder encoder(file_name, proxy.sx, proxy.sy, codec_id);
+ for (int z = 0; z < proxy.sz; ++z) {
+ for (int y = 0; y < proxy.sy; ++y) {
+ for (int x = 0; x < proxy.sx; ++x) {
+ for (int c = 0; c < 3; ++c) {
+ int ic = c;
+ if (c >= proxy.sc) ic = 0; // single channel volume to gray RGB movie
+ double val = proxy.value_at(x, y, z, ic);
+ val = (val - imin[ic]) * irange2[ic]; // rescale to range 0-255
+ encoder.setPixelIntensity(x, y, c, (int)val);
+ }
+ }
+ }
+ encoder.write_frame();
+ }
+ return true;
+ } catch (...) {}
+
+ return false;
+}
+
+bool loadStackFFMpeg(const char* fileName, Image4DSimple& img)
+{
+ return loadStackFFMpeg(QUrl::fromLocalFile(fileName), img);
+}
+
+bool loadStackFFMpeg(QUrl url, Image4DSimple& img)
+{
+ try {
+ FFMpegVideo video(url);
+ if (! video.isOpen)
+ return false;
+ int sx = video.getWidth();
+ int sy = video.getHeight();
+ int sz = video.getNumberOfFrames();
+ int sc = video.getNumberOfChannels();
+ // cout << "Number of frames = " << sz << endl;
+
+ img.createBlankImage(sx, sy, sz, sc, 1); // 1 byte = 8 bits per value
+ Image4DProxy<Image4DSimple> proxy(&img);
+
+ int frameCount = 0;
+ for (int z = 0; z < sz; ++z)
+ {
+ video.fetchFrame(z);
+ int z = frameCount;
+ frameCount++;
+ for(int c = 0; c < sc; ++c) {
+ for (int y = 0; y < sy; ++y) {
+ for (int x = 0; x < sx; ++x) {
+ proxy.put_at(x, y, z, c,
+ video.getPixelIntensity(x, y, (FFMpegVideo::Channel)c)
+ );
+ }
+ }
+ }
+ }
+ cout << "Number of frames found = " << frameCount << endl;
+
+ return true;
+
+ } catch(...) {}
+
+ return false;
+}
+
+bool loadStackFFMpegAsGray(const char* fileName, Image4DSimple& img)
+{
+ loadStackFFMpegAsGray(QUrl::fromLocalFile(fileName), img);
+}
+
+bool loadStackFFMpegAsGray(QUrl url, Image4DSimple& img)
+{
+ try {
+ FFMpegVideo video(url);
+ int sx = video.getWidth();
+ int sy = video.getHeight();
+ int sz = video.getNumberOfFrames();
+ int sc = video.getNumberOfChannels();
+ // cout << "Number of frames = " << sz << endl;
+
+ img.createBlankImage(sx, sy, sz, 1, 1); // 1 byte = 8 bits per value
+ Image4DProxy<Image4DSimple> proxy(&img);
+
+ int frameCount = 0;
+ for (int z = 0; z < sz; ++z)
+ {
+ video.fetchFrame(z);
+ int z = frameCount;
+ frameCount++;
+ for (int y = 0; y < sy; ++y) {
+ for (int x = 0; x < sx; ++x) {
+ // Use average of R,G,B as gray value
+ int val = 0;
+ for(int c = 0; c < sc; ++c) {
+ val += video.getPixelIntensity(x, y, (FFMpegVideo::Channel)c);
+ }
+ val /= sc; // average of rgb
+ proxy.put_at(x, y, z, 0, val);
+ }
+ }
+ }
+ // cout << "Number of frames found = " << frameCount << endl;
+
+ return true;
+
+ } catch(...) {}
+
+ return false;
+}
+
+#endif // USE_FFMPEG
+
+
diff --git a/vaa3d_wrapper/loadV3dFFMpeg.h b/vaa3d_wrapper/loadV3dFFMpeg.h
new file mode 100644
index 0000000..a4096ea
--- /dev/null
+++ b/vaa3d_wrapper/loadV3dFFMpeg.h
@@ -0,0 +1,20 @@
+#ifndef LOADV3DFFMPEG_H
+#define LOADV3DFFMPEG_H
+
+#ifdef USE_FFMPEG
+
+#include "../../v3d/v3d_core.h" // Image4DSimple
+extern "C" {
+#include "libavcodec/avcodec.h"
+}
+#include <iostream>
+
+bool loadStackFFMpeg(const char* fileName, Image4DSimple& image);
+bool loadStackFFMpegAsGray(const char* fileName, Image4DSimple& img);
+bool loadStackFFMpeg(QUrl url, Image4DSimple& image);
+bool loadStackFFMpegAsGray(QUrl url, Image4DSimple& img);
+bool saveStackFFMpeg(const char * fileName, const My4DImage& img, enum CodecID codec_id = CODEC_ID_MPEG4);
+
+#endif // USE_FFMPEG
+
+#endif // LOADV3DFFMPEG_H