summaryrefslogtreecommitdiff
path: root/rotord
diff options
context:
space:
mode:
Diffstat (limited to 'rotord')
-rw-r--r--rotord/src/graph.cpp221
-rw-r--r--rotord/src/graph.h2
-rw-r--r--rotord/src/libavwrapper.cpp1176
-rw-r--r--rotord/src/libavwrapper.h187
-rw-r--r--rotord/src/rotor.cpp57
-rw-r--r--rotord/src/rotor.h27
6 files changed, 24 insertions, 1646 deletions
diff --git a/rotord/src/graph.cpp b/rotord/src/graph.cpp
index 81f3275..74a569b 100644
--- a/rotord/src/graph.cpp
+++ b/rotord/src/graph.cpp
@@ -74,12 +74,16 @@ bool Graph::preview(xmlIO &XML,string &node,string &_format,int frame,int w,int
}
}
XML.popTag();
- return true;
+ return true;
}
return false;
}
bool Graph::video_render(const string &output_filename,const float framerate) {
+
+ //https://www.adobe.com/devnet/video/articles/mp4_movie_atom.html
+ //https://www.google.ie/search?q=ffmbc&aq=f&oq=ffmbc&aqs=chrome.0.57j0l2j60j0j60.4360j0&sourceid=chrome&ie=UTF-8#q=ffmbc+git
+
//vector<Node*> loaders=find_nodes("video_loader");
//for (auto i:loaders){
// if (!dynamic_cast<Video_loader*>(i)->isLoaded) {
@@ -96,7 +100,12 @@ bool Graph::video_render(const string &output_filename,const float framerate) {
//setup defaults
int bitRate=5000000;
AVCodecID codecId=AV_CODEC_ID_H264; //MPEG4;
- std::string container ="mp4";
+ std::string container;
+ Poco::StringTokenizer t(output_filename,".");
+ if (t.count()>1){
+ container="."+t[t.count()-1];
+ }
+ else container=".mp4";
//at the moment it crashes if you render before audio is loaded and also on 2nd render
libav::exporter exporter;
@@ -141,7 +150,7 @@ bool Graph::video_render(const string &output_filename,const float framerate) {
Audio_frame *a;
int64_t sample_start=0;
while (vf<duration&&!cancelled){ //-vstep) {
-
+
if (usingaudio) {
if (audioloader.get_samples(audio,sample_start,samples_in_frame)) {
if (aoffs>0){
@@ -174,7 +183,7 @@ bool Graph::video_render(const string &output_filename,const float framerate) {
sample_start+=samples_in_frame;
}
else usingaudio=false;
-
+
}
@@ -223,151 +232,7 @@ bool Graph::video_render(const string &output_filename,const float framerate) {
return false;
}
-
- cerr<<"Rotor: video output node not found"<<endl;
- return false;
-}
-
-bool Graph::_video_render(const string &output_filename,const float framerate) {
- //vector<Node*> loaders=find_nodes("video_loader");
- //for (auto i:loaders){
- // if (!dynamic_cast<Video_loader*>(i)->isLoaded) {
- // cerr<<"Rotor: all loaders must be populated before rendering"<<endl;
- // return false;
- // }
- //}
- if (find_node("video_output")) {
- Video_output *video_output=dynamic_cast<Video_output*>(find_node("video_output"));
- for (auto f: find_nodes("video_feedback")){
- (dynamic_cast<Video_feedback*>(f))->set_feedback(&(video_output->image));
- }
- //
- //setup defaults
- int bitRate=5000000;
- AVCodecID codecId=AV_CODEC_ID_H264; //MPEG4;
- std::string container ="mp4";
-
- //at the moment it crashes if you render before audio is loaded and also on 2nd render
- libav::exporter exporter;
-
- float spct=100.0f/duration;
-
- if (exporter.setup(outW,outH,bitRate,framerate,container)) { //codecId,
- if (exporter.record(output_filename)) {
-
- libav::audioloader audioloader;
-
- bool usingaudio=audioloader.setup(audio_filename);
- float *avframe=nullptr;
-
- Logger& logger = Logger::get("Rotor");
- logger.information("Video_output rendering "+output_filename+": "+toString(duration)+" seconds at "+toString(framerate)+" fps, audio frame size: "+toString(exporter.get_audio_framesize()));
- //25fps video and 43.06640625fps audio? hmm
- //how to get the timecodes correct for the interleaved files
-
- struct timeval start, end;
-
- gettimeofday(&start, NULL);
-
- uint16_t *audioframe;
- int samples_in_frame;
-
- if (usingaudio){
- //does audioloader output interleaved samples?
- samples_in_frame=(audioloader.codecContext->sample_rate)/framerate;
- string whether=usingaudio?"Loading":"Cannot load";
- logger.information(whether+" audio file: "+audio_filename+", each frame contains "+toString(samples_in_frame)+" samples at "+toString(audioloader.codecContext->sample_rate)+" hz");
- audioframe=new uint16_t[(samples_in_frame+exporter.get_audio_framesize())*audioloader.codecContext->channels];
- }
-
- float vstep=1.0f/framerate;
- float v=0.0f;
- float vf=0.0f;
- float af=0.0f;
- int aoffs=0;
- int audioend=0;
- Audio_frame *a;
- while (vf<duration&&!cancelled){ //-vstep) {
- uint16_t *audio=nullptr;
- if (usingaudio) {
- uint16_t *audio=audioloader.get_samples(samples_in_frame);
- if (aoffs>0){
- //shift down samples
- int s=0;
- while ((s+aoffs)<audioend) {
- for (int j=0;j<audioloader.codecContext->channels;j++){
- audioframe[s*audioloader.codecContext->channels+j]=audioframe[(s+aoffs)*audioloader.codecContext->channels+j];
- }
- s++;
- }
- aoffs=s;
- }
- for (int i=0;i<samples_in_frame;i++){
- for (int j=0;j<audioloader.codecContext->channels;j++){
- audioframe[(aoffs+i)*audioloader.codecContext->channels+j]=audio[i*audioloader.codecContext->channels+j];
- }
- }
- audioend=aoffs+samples_in_frame;
- aoffs=0;
- //while (fless(vf+vstep,af+exporter.get_audio_step())) {
- while (aoffs+exporter.get_audio_framesize()<audioend) {
- //insert audio frames until we are only 1 audio frame behind the next video frame
- //send audio_framesize() of them through until buffer is used
- //pass full buffer within frame_spec for av nodes
- exporter.encodeFrame(audioframe+(aoffs*audioloader.codecContext->channels));
- af+=exporter.get_audio_step();
- aoffs+=exporter.get_audio_framesize();
- }
- a=new Audio_frame(audio,audioloader.codecContext->channels,samples_in_frame);
- }
-
-
- //[mp3 @ 0x7fffe40330e0] max_analyze_duration 5000000 reached at 5015510 microseconds
- //[mp3 @ 0x7fffe4033ec0] Insufficient thread locking around avcodec_open/close()
- //[mp3 @ 0x7fffe40330e0] Estimating duration from bitrate, this may be inaccurate
- //[libx264 @ 0x7fffe8003940] using cpu capabilities: MMX2 SSE2Fast SSSE3 FastShuffle SSE4.2
- //[libx264 @ 0x7fffe8003940] profile High, level 3.0
- //[libx264 @ 0x7fffe8003940] 264 - core 123 r2189 35cf912 - H.264/MPEG-4 AVC codec - Copyleft 2003-2012 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=12 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=10 keyint_min=1 scenecut=40 intra_refresh=0 rc_lookahead=10 rc=abr mbtree=1 bitrate=400 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
- //Assertion ff_avcodec_locked failed at libavcodec/utils.c:2967
-
- //cerr<<"videoloader: "<<vf<<" seconds, vstep "<<vstep<<" ,asking for frame "<<((int)((vf*framerate)+0.5))<<endl
-
- Image* i;
- if (usingaudio) {
- i=video_output->get_output(Frame_spec(vf,framerate,duration,outW,outH,a));
- }
- else i=video_output->get_output(Frame_spec(vf,framerate,duration,outW,outH));
- if (i) {
- exporter.encodeFrame(i->RGBdata);
- }
- vf+=vstep;
- progress=vf/duration;
- if (usingaudio) {delete a;};
- }
-
- exporter.finishRecord();
-
- gettimeofday(&end, NULL);
-
- float mtime = ((end.tv_sec-start.tv_sec) + (end.tv_usec-start.tv_usec)/1000000.0) + 0.5;
-
- logger.information("Video_output: rendered "+output_filename+": in "+toString(mtime)+" seconds");
-
- if (usingaudio) {
- audioloader.close();
- delete[] audioframe;
- }
-
-
-
- return true;
- }
- }
-
- return false;
- }
-
cerr<<"Rotor: video output node not found"<<endl;
return false;
}
@@ -388,7 +253,7 @@ bool Graph::load(string data,string media_path){
return false;
}
bool Graph::loadFile(string &filename,string media_path){
- //if (loaded)
+ //if (loaded)
printf("loading graph: %s\n",(filename).c_str());
if (xml.loadFile(filename)){
return parseXml(media_path);
@@ -494,9 +359,9 @@ bool Graph::parseJson(string &data,string &media_path){
}
//parameters
for (int l=0;l<jnodes[i]["parameters"].size();l++){
-
+
string parameter=jnodes[i]["parameters"][l]["name"].asString();
-
+
if (nodes[nodeID]->parameters.find(parameter)!=nodes[nodeID]->parameters.end()) {
float val=jnodes[i]["parameters"][l]["value"].asFloat();
if (val!=nodes[nodeID]->parameters.find(parameter)->second->value){
@@ -514,7 +379,7 @@ bool Graph::parseJson(string &data,string &media_path){
else if (fromID!="") cerr << "Rotor: linking parameter " << parameter << " of node: '" << nodeID << "', cannot find target '" << fromID << "'" << endl;
}
else cerr << "Rotor: cannot find parameter '" << parameter << "' of "<<settings["type"]<<" "<< nodeID << endl;
-
+
}
//attributes
for (int m=0;m<jnodes[i]["attributes"].size();m++){
@@ -582,7 +447,7 @@ bool Graph::parseXml(string media_path){
else cerr << "Rotor: linking input " << i2 << " of node: '" << nodeID << "', cannot find target '" << fromID << "'" << endl;
}
else cerr << "Rotor: input " << i2 << " of node: '" << nodeID << "' does not exist" << endl;
-
+
}
int n3=xml.getNumTags("image_input");
for (int i3=0;i3<n3;i3++){
@@ -651,7 +516,7 @@ bool Graph::parseXml(string media_path){
// nodes[nodeID]->set_parameter(xml.getAttribute("parameter","name","",i5),xml.getAttribute("parameter","value","",i5));
//}
//if (n5>0) cerr << "Rotor: found " << n5 << " extra parameters for node '" << nodeID << "'" << endl;
-
+
xml.popTag();
}
}
@@ -667,54 +532,6 @@ bool Graph::parseXml(string media_path){
loaded=true;
return true;
}
-bool Graph::_load_audio(const string &filename,vector<Audio_processor*> processors){
- Logger& logger = Logger::get("Rotor");
- logger.information("Analysing "+filename);
-
- libav::audioloader loader;
- loader.setup(filename);
-
- duration=((float)loader.formatContext->duration)/AV_TIME_BASE;
-
- int rate = loader.codecContext->sample_rate;
- int samples = ((loader.formatContext->duration + 5000)*rate)/AV_TIME_BASE; //why 5000 more?
- int channels= loader.codecContext->channels;
- int bits = 16; //???why can't we read this loader.codecContext->bits_per_raw_sample;
-
- for (auto p: processors) {
- if(!p->init(channels,bits,samples,rate) ){
- logger.error("ERROR: Audio plugin failed to initialse");
- return false;
- }
- }
-
- AVFrame* frame=loader.get_frame();
- int sample_processed=0;
-
- while (frame&&!cancelled)
- {
- //now we can pass the data to the processor(s)
- for (auto p: processors) {
- p->process_frame(frame->data[0],frame->nb_samples);
- }
- sample_processed+=frame->nb_samples;
- //mutex.lock();
- progress=((float)sample_processed)/samples; //atomic on 64 bit?
- //mutex.unlock();
-
- frame=loader.get_frame();
- }
-
- loader.close();
-
- for (auto p: processors) {
- p->cleanup();
- p->print_summary();
- }
-
- logger.information("Finished audio analysis");
- return true;
-}
bool Graph::load_audio(const string &filename,vector<Audio_processor*> processors){
Logger& logger = Logger::get("Rotor");
logger.information("Analysing "+filename);
diff --git a/rotord/src/graph.h b/rotord/src/graph.h
index 4df01bf..cbcc121 100644
--- a/rotord/src/graph.h
+++ b/rotord/src/graph.h
@@ -42,7 +42,6 @@ namespace Rotor {
vector<Node*> find_nodes(const string &type); //could be a way of finding a set based on capabilities?
Node* find_node(const string &type);
bool signal_render(string &signal_xml,const float framerate);
- bool _video_render(const string &output_filename,const float framerate);
bool video_render(const string &output_filename,const float framerate);
bool load(string data,string media_path);
bool loadFile(string &filename,string media_path);
@@ -53,7 +52,6 @@ namespace Rotor {
bool check_audio(string audio,string path);
bool print_features(xmlIO &XML,string &node);
bool load_audio(const string &filename,vector<Audio_processor*> processors);
- bool _load_audio(const string &filename,vector<Audio_processor*> processors);
bool load_video(const string &nodeID,const string &filename);//can be performance or clip
bool loaded;
float duration;
diff --git a/rotord/src/libavwrapper.cpp b/rotord/src/libavwrapper.cpp
index 1fe0e5a..d09b7dc 100644
--- a/rotord/src/libavwrapper.cpp
+++ b/rotord/src/libavwrapper.cpp
@@ -3,79 +3,12 @@
extern Poco::Mutex mutex; //application wide mutex
static Poco::Mutex mutex;
-
-extern "C"
-{
-#include <libswscale/swscale.h>
-}
-
-
#include <stdexcept>
#include <iostream>
#include <cassert>
using namespace std;
-// Translated to C++ by Christopher Bruns May 2012
-// from ffmeg_adapt.c in whisk package by Nathan Clack, Mark Bolstadt, Michael Meeuwisse
-
-
-// Avoid link error on some macs
-#ifdef __APPLE__
-extern "C" {
-#include <stdlib.h>
-#include <errno.h>
-
-}
-#endif
-
-// Custom read function so FFMPEG does not need to read from a local file by name.
-// But rather from a stream derived from a URL or whatever.
-extern "C" {
-
-int readFunction(void* opaque, uint8_t* buf, int buf_size)
-{
- //QIODevice* stream = (QIODevice*)opaque;
- ifstream* stream = (ifstream*)opaque;
- //int numBytes =
- stream->read((char*)buf, (streamsize)buf_size);
- return stream->gcount(); //?? is this right
- //numBytes; //TODO work out
-}
-
-// http://cdry.wordpress.com/2009/09/09/using-custom-io-callbacks-with-ffmpeg/
-int64_t seekFunction(void* opaque, int64_t offset, int whence)
-{
- //QIODevice* stream = (QIODevice*)opaque;
- ifstream* stream = (ifstream*)opaque;
- if (stream == NULL)
- return -1;
- else if (whence == AVSEEK_SIZE)
- return -1; // "size of my handle in bytes"
- //else if (stream->isSequential())
- // return -1; // cannot seek a sequential stream //presume this would be certain kind of network stream
- else if (whence == SEEK_CUR) { // relative to start of file
- if (! stream->seekg(offset,ios_base::cur)) //stream->pos() + offset) )
- return -1;
- }
- else if (whence == SEEK_END) { // relative to end of file
- assert(offset < 0);
- if (! stream->seekg(offset,ios_base::end)) //stream->size() + offset) )
- return -1;
- }
- else if (whence == SEEK_SET) { // relative to start of file
- if (! stream->seekg(offset) )
- return -1;
- }
- else {
- assert(false);
- }
- return stream->tellg();
-}
-
-}
-
-
void libav::maybeInitFFMpegLib()
{
if (b_is_one_time_inited)
@@ -87,476 +20,6 @@ void libav::maybeInitFFMpegLib()
b_is_one_time_inited = true;
}
-
-
-/////////////////////////////
-// AVPacketWrapper methods //
-/////////////////////////////
-
-
-class AVPacketWrapper
-{
-public:
- AVPacketWrapper();
- virtual ~AVPacketWrapper();
- void free();
-
- AVPacket packet;
-};
-
-
-AVPacketWrapper::AVPacketWrapper()
-{
- packet.destruct = NULL;
-}
-
-/* virtual */
-AVPacketWrapper::~AVPacketWrapper()
-{
- free();
-}
-
-void AVPacketWrapper::free()
-{
- av_free_packet(&packet);
-}
-
-
-//bool libav::b_is_one_time_inited = false;
-
-/////////////////////////
-// decoder methods //
-/////////////////////////
-
-libav::decoder::decoder(PixelFormat pixelFormat)
- : isOpen(false)
-{
- Sctx = NULL;
- pRaw = NULL;
- pFrameRGB = NULL;
- pCtx = NULL;
- container = NULL;
- buffer = NULL;
- blank = NULL;
- format = pixelFormat;
- mutex.lock();
- initialize();
- mutex.unlock();
-}
-
-
-
-void libav::decoder::cleanup(){
-
- mutex.lock();
- if (NULL != Sctx) {
- sws_freeContext(Sctx);
- Sctx = NULL;
- }
- if (NULL != pRaw) {
- av_free(pRaw);
- pRaw = NULL;
- }
- if (NULL != pFrameRGB) {
- av_free(pFrameRGB);
- pFrameRGB = NULL;
- }
- if (NULL != pCtx) {
- avcodec_close(pCtx);
- pCtx = NULL;
- }
- if (NULL != container) {
- avformat_close_input(&container);
- container = NULL;
- }
- if (NULL != buffer) {
- av_free(buffer);
- buffer = NULL;
- }
- if (NULL != blank) {
- av_free(blank);
- blank = NULL;
- }
- mutex.unlock();
- /*
- if (NULL != avioContext) {
- av_free(avioContext);
- avioContext = NULL;
- }
- */
- // Don't need to free pCodec?
-
-}
-
-/* virtual */
-libav::decoder::~decoder()
-{
- cleanup();
-}
-
-
-// file name based method for historical continuity
-bool libav::decoder::open(char* fileName, enum PixelFormat formatParam){
-
- if (!avtry( avformat_open_input(&container, fileName, NULL, NULL), string(fileName) ))
- return false;
- return openUsingInitializedContainer(formatParam);
-}
-bool libav::decoder::open(string& fileName, enum PixelFormat formatParam)
-{
- // Open file, check usability
-
- if (!avtry( avformat_open_input(&container, fileName.c_str(), NULL, NULL), fileName ))
- return false;
- return openUsingInitializedContainer(formatParam);
-}
-
-
-bool libav::decoder::openUsingInitializedContainer(enum PixelFormat formatParam)
-{
- format = formatParam;
- sc = getNumberOfChannels();
-
- if (!avtry( avformat_find_stream_info(container, NULL), "Cannot find stream information." ))
- return false;
- if (!avtry( videoStream=av_find_best_stream(container, AVMEDIA_TYPE_VIDEO, -1, -1, &pCodec, 0), "Cannot find a video stream." ))
- return false;
- pCtx=container->streams[videoStream]->codec;
- width = pCtx->width;
- height = pCtx->height;
- if (!avtry( avcodec_open2(pCtx, pCodec, NULL), "Cannot open video decoder." ))
- return false;
-
- /* Frame rate fix for some codecs */
- if( pCtx->time_base.num > 1000 && pCtx->time_base.den == 1 )
- pCtx->time_base.den = 1000;
-
- framerate=(((float)container->streams[videoStream]->r_frame_rate.num)/((float)container->streams[videoStream]->r_frame_rate.den));
-
- //cerr<<"codecContext timebase: "<<(((float)pCtx->time_base.num)/((float)pCtx->time_base.den))<<" videostream framerate: "<<(((float)container->streams[videoStream]->r_frame_rate.num)/((float)container->streams[videoStream]->r_frame_rate.den))<<endl;
-
- //cerr<<"stream frame rate:"<<container->streams[videoStream]->r_frame_rate.num<<"/"<<container->streams[videoStream]->r_frame_rate.den<<endl;
-
- //cerr<<"video duration: "<<container->duration<<endl;
- //cerr<<"video time base: "<<pCtx->time_base.num<<"/"<<pCtx->time_base.den<<endl;
- //cerr<<"AV time base: "<<AV_TIME_BASE<<endl;
-
- /* Compute the total number of frames in the file */
- /* duration is in microsecs */
- //numFrames = (int)(( container->duration / (double)AV_TIME_BASE ) * pCtx->time_base.den + 0.5);
- //this approach just seems wrong!
-
-
-
- numFrames=container->streams[videoStream]->nb_frames-1;
-
- if (numFrames<1){
- //some codecs don't keep this info in the header
- numFrames = (int)(( container->duration / (double)AV_TIME_BASE ) * framerate );
- //this approach still doesn't seem to give quite the right answer- comes out a little too big
- //could alternatively just redefine the length if the reader fails
- }
-
-
-
- init_buffers_and_scaler();
-
- /* Give some info on stderr about the file & stream */
- //dump_format(container, 0, fname, 0);
-
- previousFrameIndex = -1;
- return true;
-}
-bool libav::decoder::reinit_buffers_and_scaler(){
-
- mutex.lock();
- if (NULL != Sctx) {
- sws_freeContext(Sctx);
- Sctx = NULL;
- }
- if (NULL != pRaw) {
- av_free(pRaw);
- pRaw = NULL;
- }
- if (NULL != pFrameRGB) {
- av_free(pFrameRGB);
- pFrameRGB = NULL;
- }
- if (NULL != buffer) {
- av_free(buffer);
- buffer = NULL;
- }
- if (NULL != blank) {
- av_free(blank);
- blank = NULL;
- }
- mutex.unlock();
-
- init_buffers_and_scaler();
-}
-
-bool libav::decoder::init_buffers_and_scaler(){
- /* Get framebuffers */
- if (! (pRaw = avcodec_alloc_frame()) )
- throw std::runtime_error("");
- if (! (pFrameRGB = avcodec_alloc_frame()) )
- throw std::runtime_error("");
-
- /* Create data buffer */
- if (format == PIX_FMT_NONE) {
- numBytes = 0;
- buffer = NULL;
- blank = NULL;
- pFrameRGB = NULL;
- Sctx = NULL;
- }
- else {
- numBytes = avpicture_get_size( format, width, height ); // RGB24 format
- if (! (buffer = (uint8_t*)av_malloc(numBytes + FF_INPUT_BUFFER_PADDING_SIZE)) ) // RGB24 format
- throw std::runtime_error("");
- if (! (blank = (uint8_t*)av_mallocz(avpicture_get_size(pCtx->pix_fmt,width,height))) ) // native codec format
- throw std::runtime_error("");
-
- /* Init buffers */
- avpicture_fill( (AVPicture * ) pFrameRGB, buffer, format,
- width, height );
-
- /* Init scale & convert */
- if (! (Sctx=sws_getContext(
- pCtx->width,
- pCtx->height,
- pCtx->pix_fmt,
- width,
- height,
- format,
- SWS_POINT, // fastest?
- NULL,NULL,NULL)) )
- throw std::runtime_error("");
- }
-}
-
-bool libav::decoder::fetchFrame(int w, int h,int targetFrameIndex)
-{
- if (w!=width||h!=height){
- width=w;
- height=h;
- cerr<<"libav::decoder reiniting to "<<width<<"x"<<height<<endl; //does not seem to be aware of wrong frame
- reinit_buffers_and_scaler();
- }
-
- //seems to crash out on the last frame, if it can be caught should maybe decrement number of frames
-
- return fetchFrame(targetFrameIndex);
-}
-
-bool libav::decoder::fetchFrame(int targetFrameIndex)
-{
- if ((targetFrameIndex < 0) || (targetFrameIndex > numFrames))
- return false;
- if (targetFrameIndex == (previousFrameIndex + 1)) {
- if (! readNextFrame(targetFrameIndex+1)) //frame indexing starts at 1
- return false;
- }
- else {
- int64_t response=seekToFrame(targetFrameIndex+1); //frame indexing starts at 1
- if (response < 0)
- return false;
- if (response!=targetFrameIndex+1) {
- cerr<<"libav::decoder asked for "<<targetFrameIndex<<", got "<<(response-1)<<endl; //does not seem to be aware of wrong frame
- }
- }
- previousFrameIndex = targetFrameIndex;
- return true;
-}
-
-// \returns current frame on success, otherwise -1
-int libav::decoder::seekToFrame(int targetFrameIndex)
-{
- int64_t duration = container->streams[videoStream]->duration;
- int64_t ts = av_rescale(duration,targetFrameIndex,numFrames);
- int64_t tol = av_rescale(duration,1,2*numFrames);
- if ( (targetFrameIndex < 0) || (targetFrameIndex >= numFrames) ) {
- return -1;
- }
- int result = avformat_seek_file( container, //format context
- videoStream,//stream id
- 0, //min timestamp 0?
- ts, //target timestamp
- ts, //max timestamp
- 0);//flags AVSEEK_FLAG_ANY //
-
- if (result< 0)
- return -1;
-
- avcodec_flush_buffers(pCtx);
- //if (! readNextFrame(targetFrameIndex))
- // return -1;
-
- return targetFrameIndex;
-}
-
-// \returns current frame on success, otherwise -1
-int libav::decoder::seekToFrameNew(int targetFrameIndex)
-{
- int64_t duration = container->streams[videoStream]->duration;
- int64_t ts = av_rescale(duration,targetFrameIndex,numFrames);
- int64_t tol = av_rescale(duration,1,2*numFrames);
- if ( (targetFrameIndex < 0) || (targetFrameIndex >= numFrames) ) {
- return -1;
- }
-
- int flags = AVSEEK_FLAG_BACKWARD;
- if (ts > 0 && ts < duration)
- flags |= AVSEEK_FLAG_ANY; // H.264 I frames don't always register as "key frames" in FFmpeg
-
- int ret = av_seek_frame(container, videoStream, ts, flags);
- if (ret < 0)
- ret = av_seek_frame(container, videoStream, ts, AVSEEK_FLAG_ANY);
-
-
- if (ret< 0)
- return -1;
-
- return targetFrameIndex;
-}
-
-bool libav::decoder::readNextFrame(int targetFrameIndex)
-{
- AVPacket packet = {0};
- //av_init_packet(&packet); //moved insode loop 100913
- bool result = readNextFrameWithPacket(targetFrameIndex, packet, pRaw);
- //av_free_packet(&packet); //moved insode loop 100913
- return result;
-}
-
-// WARNING this method can raise an exception
-bool libav::decoder::readNextFrameWithPacket(int targetFrameIndex, AVPacket& packet, AVFrame* pYuv)
-{
- int finished = 0;
- do {
- finished = 0;
- //av_free_packet(&packet); //moved insode loop 100913
- int result;
- //if (!avtry(av_read_frame( container, &packet ), "Failed to read frame"))
- if (!avtry(av_read_packet( container, &packet ), "Failed to read packet"))
- return false; // !!NOTE: see docs on packet.convergence_duration for proper seeking
- if( packet.stream_index != videoStream ) /* Is it what we're trying to parse? */
- continue;
- if (!avtry(avcodec_decode_video2( pCtx, pYuv, &finished, &packet ), "Failed to decode video"))
- return false;
- // handle odd cases and debug
- if((pCtx->codec_id==CODEC_ID_RAWVIDEO) && !finished)
- {
- avpicture_fill( (AVPicture * ) pYuv, blank, pCtx->pix_fmt,width, height ); // set to blank frame
- finished = 1;
- }
-#if 0 // very useful for debugging, very
- cout << "Packet - pts:" << (int)packet.pts;
- cout << " dts:" << (int)packet.dts;
- cout << " - flag: " << packet.flags;
- cout << " - finished: " << finished;
- cout << " - Frame pts:" << (int)pYuv->pts;
- cout << " " << (int)pYuv->best_effort_timestamp;
- cout << endl;
- /* printf("Packet - pts:%5d dts:%5d (%5d) - flag: %1d - finished: %3d - Frame pts:%5d %5d\n",
- (int)packet.pts,(int)packet.dts,
- packet.flags,finished,
- (int)pYuv->pts,(int)pYuv->best_effort_timestamp); */
-#endif
- if(!finished) {
- if (packet.pts == AV_NOPTS_VALUE)
- packet.pts = 0;
- //throw std::runtime_error("");
- //why does it want to throw an error here, isn't the frame succesfully decoded?
- //
- //when we allow these packets through we get
- //[swscaler @ 0x9ef0c80] bad src image pointers
- //trying to ignore timestamp below
- if (packet.size == 0) // packet.size==0 usually means EOF
- break;
- }
- av_free_packet(&packet);
- } while ( (!finished) || (pYuv->best_effort_timestamp < targetFrameIndex));
- // } while (!finished);
-
- //av_free_packet(&packet); //moved insode loop 100913
-
- if (format != PIX_FMT_NONE) {
- sws_scale(Sctx, // sws context
- pYuv->data, // src slice
- pYuv->linesize, // src stride
- 0, // src slice origin y
- pCtx->height, // src slice height
- pFrameRGB->data, // dst
- pFrameRGB->linesize ); // dst stride
- }
-
- previousFrameIndex = targetFrameIndex;
- return true;
-}
-
-uint8_t libav::decoder::getPixelIntensity(int x, int y, Channel c) const
-{
- return *(pFrameRGB->data[0] + y * pFrameRGB->linesize[0] + x * sc + c);
-}
-
-int libav::decoder::getNumberOfFrames() const { return numFrames; }
-
-int libav::decoder::getWidth() const { return width; }
-
-int libav::decoder::getHeight() const { return height; }
-
-int libav::decoder::getNumberOfChannels() const
-{
- switch(format)
- {
- case PIX_FMT_BGRA:
- return 4;
- break;
- case PIX_FMT_RGB24:
- return 3;
- break;
- case PIX_FMT_GRAY8:
- return 1;
- break;
- default:
- return 0;
- break;
- }
- return 0;
-}
-
-void libav::decoder::initialize()
-{
- Sctx = NULL;
- pRaw = NULL;
- pFrameRGB = NULL;
- pCtx = NULL;
- container = NULL;
- buffer = NULL;
- blank = NULL;
- pCodec = NULL;
- format = PIX_FMT_NONE;
- //network stuff
- //reply = NULL;
- //ioBuffer = NULL;
- //avioContext = NULL;
- maybeInitFFMpegLib();
-}
-
-
-bool libav::decoder::avtry(int result, const std::string& msg) {
- if ((result < 0) && (result != AVERROR_EOF)) {
- char buf[1024];
- av_strerror(result, buf, sizeof(buf));
- std::string message = std::string("libav::Error: ") + msg + " "+ buf;
- //qDebug() << QString(message.c_str());
- cerr<<message<<endl;
- return false;
- }
- return true;
-}
-
void libav::video_decoder::cleanup(){
if (loaded) {
mutex.lock();
@@ -646,346 +109,7 @@ bool libav::audio_decoder::open(const std::string& filename){
loaded=false;
}
}
-///////////////////////////
-// encoder methods //
-///////////////////////////
-
-/*
-libav::encoder::encoder(const char * file_name, int width, int height, float _framerate,enum AVCodecID codec_id)
- : picture_yuv(NULL)
- , picture_rgb(NULL)
- , container(NULL)
-{
- //multiply float seconds by this to get pts
- timebase=((float)AV_TIME_BASE_Q.den)/(AV_TIME_BASE_Q.num*_framerate*3.125f); //no idea where the 3.125 comes from
- if (0 != (width % 2))
- cerr << "WARNING: Video width is not a multiple of 2" << endl;
- if (0 != (height % 2))
- cerr << "WARNING: Video height is not a multiple of 2" << endl;
-
- maybeInitFFMpegLib();
-
- container = avformat_alloc_context();
- if (NULL == container)
- throw std::runtime_error("Unable to allocate format context");
-
- AVOutputFormat * fmt = av_guess_format(NULL, file_name, NULL);
- if (!fmt)
- fmt = av_guess_format("mpeg", NULL, NULL);
- if (!fmt)
- throw std::runtime_error("Unable to deduce video format");
- container->oformat = fmt;
-
- fmt->video_codec = codec_id;
- // fmt->video_codec = CODEC_ID_H264; // fails to write
-
- video_st = avformat_new_stream(container, NULL);
-
- pCtx = video_st->codec;
- pCtx->codec_id = fmt->video_codec;
- pCtx->codec_type = AVMEDIA_TYPE_VIDEO;
- // resolution must be a multiple of two
- pCtx->width = width;
- pCtx->height = height;
-
- // bit_rate determines image quality
- pCtx->bit_rate = width * height * 4; // ?
- // pCtx->qmax = 50; // no effect?
-
- // "high quality" parameters from http://www.cs.ait.ac.th/~on/mplayer/pl/menc-feat-enc-libavcodec.html
- // vcodec=mpeg4:mbd=2:mv0:trell:v4mv:cbp:last_pred=3:predia=2:dia=2:vmax_b_frames=2:vb_strategy=1:precmp=2:cmp=2:subcmp=2:preme=2:vme=5:naq:qns=2
- if (false) // does not help
- // if (pCtx->codec_id == CODEC_ID_MPEG4)
- {
- pCtx->mb_decision = 2;
- pCtx->last_predictor_count = 3;
- pCtx->pre_dia_size = 2;
- pCtx->dia_size = 2;
- pCtx->max_b_frames = 2;
- pCtx->b_frame_strategy = 2;
- pCtx->trellis = 2;
- pCtx->compression_level = 2;
- pCtx->global_quality = 300;
- pCtx->pre_me = 2;
- pCtx->mv0_threshold = 1;
- // pCtx->quantizer_noise_shaping = 2; // deprecated
- // TODO
- }
-
- pCtx->time_base = (AVRational){1, 25}; /////TODO FIX TO SUPPORT OTHER RATES
- // pCtx->time_base = (AVRational){1, 10};
- pCtx->gop_size = 12; // emit one intra frame every twelve frames
- // pCtx->max_b_frames = 0;
- pCtx->pix_fmt = PIX_FMT_YUV420P;
- if (fmt->flags & AVFMT_GLOBALHEADER)
- pCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
- if (pCtx->codec_id == CODEC_ID_H264)
- {
- // http://stackoverflow.com/questions/3553003/encoding-h-264-with-libavcodec-x264
- pCtx->coder_type = 1; // coder = 1
- pCtx->flags|=CODEC_FLAG_LOOP_FILTER; // flags=+loop
- pCtx->me_cmp|= 1; // cmp=+chroma, where CHROMA = 1
- // pCtx->partitions|=X264_PART_I8X8+X264_PART_I4X4+X264_PART_P8X8+X264_PART_B8X8; // partitions=+parti8x8+parti4x4+partp8x8+partb8x8
- pCtx->me_method=ME_HEX; // me_method=hex
- pCtx->me_subpel_quality = 7; // subq=7
- pCtx->me_range = 16; // me_range=16
- pCtx->gop_size = 250; // g=250
- pCtx->keyint_min = 25; // keyint_min=25
- pCtx->scenechange_threshold = 40; // sc_threshold=40
- pCtx->i_quant_factor = 0.71; // i_qfactor=0.71
- pCtx->b_frame_strategy = 1; // b_strategy=1
- pCtx->qcompress = 0.6; // qcomp=0.6
- pCtx->qmin = 10; // qmin=10
- pCtx->qmax = 51; // qmax=51
- pCtx->max_qdiff = 4; // qdiff=4
- pCtx->max_b_frames = 3; // bf=3
- pCtx->refs = 3; // refs=3
- // pCtx->directpred = 1; // directpred=1
- pCtx->trellis = 1; // trellis=1
- // pCtx->flags2|=CODEC_FLAG2_BPYRAMID+CODEC_FLAG2_MIXED_REFS+CODEC_FLAG2_WPRED+CODEC_FLAG2_8X8DCT+CODEC_FLAG2_FASTPSKIP; // flags2=+bpyramid+mixed_refs+wpred+dct8x8+fastpskip
- // pCtx->weighted_p_pred = 2; // wpredp=2
- // libx264-main.ffpreset preset
- // pCtx->flags2|=CODEC_FLAG2_8X8DCT;
- // pCtx->flags2^=CODEC_FLAG2_8X8DCT; // flags2=-dct8x8
- }
-
- AVCodec * codec = avcodec_find_encoder(pCtx->codec_id);
- if (NULL == codec)
- throw std::runtime_error("Unable to find Mpeg4 codec");
- if (codec->pix_fmts)
- pCtx->pix_fmt = codec->pix_fmts[0];
- {
- //QMutexLocker lock(&decoder::mutex);
- mutex.lock();
- if (avcodec_open2(pCtx, codec, NULL) < 0)
- throw std::runtime_error("Error opening codec");
- mutex.unlock();
- }
-
- // Get framebuffers //
- if (! (picture_yuv = avcodec_alloc_frame()) ) // final frame format
- throw std::runtime_error("");
- if (! (picture_rgb = avcodec_alloc_frame()) ) // rgb version I can understand easily
- throw std::runtime_error("");
- // the image can be allocated by any means and av_image_alloc() is
- // just the most convenient way if av_malloc() is to be used //
- if ( av_image_alloc(picture_yuv->data, picture_yuv->linesize,
- pCtx->width, pCtx->height, pCtx->pix_fmt, 1) < 0 )
- throw std::runtime_error("Error allocating YUV frame buffer");
- if ( av_image_alloc(picture_rgb->data, picture_rgb->linesize,
- pCtx->width, pCtx->height, PIX_FMT_RGB24, 1) < 0 )
- throw std::runtime_error("Error allocating RGB frame buffer");
-
- // Init scale & convert //
- if (! (Sctx=sws_getContext(
- width,
- height,
- PIX_FMT_RGB24,
- pCtx->width,
- pCtx->height,
- pCtx->pix_fmt,
- SWS_BICUBIC,NULL,NULL,NULL)) )
- throw std::runtime_error("");
-
-//
-//
-// added audio init
- fmt->audio_codec = AV_CODEC_ID_MP3;
- // fmt->video_codec = CODEC_ID_H264; // fails to write
-
- audio_st = avformat_new_stream(container, NULL);
-
- aCtx = audio_st->codec;
- aCtx->codec_id = fmt->audio_codec;
- aCtx->codec_type = AVMEDIA_TYPE_AUDIO;
-
- aCtx->sample_fmt=AV_SAMPLE_FMT_S16P; //s16p is invalid or not supported by aac: S16 not by mp3
- aCtx->channels=2;
- aCtx->sample_rate=44100;
- aCtx->channel_layout=AV_CH_LAYOUT_STEREO;
- aCtx->bit_rate = 64000;
-
-
-
- AVCodec * acodec = avcodec_find_encoder(aCtx->codec_id);
- mutex.lock();
- int ret = avcodec_open2(aCtx, acodec, NULL);
- mutex.unlock();
- if (ret < 0) {
- throw std::runtime_error("Could not open audio codec:");
-
- }
-
- if (aCtx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
- audio_input_frame_size = 10000;
- else
- audio_input_frame_size = aCtx->frame_size;
-
-
- if (container->oformat->flags & AVFMT_GLOBALHEADER)
- aCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
-
- audiostep=((float)audio_input_frame_size)/(aCtx->sample_rate);
-
-
-
-
-// are we supposed to use the same codeccontext?
-//
-
- // open the output file //
- if (!(fmt->flags & AVFMT_NOFILE))
- {
- //QMutexLocker lock(&decoder::mutex);
- mutex.lock();
- if (avio_open(&container->pb, file_name, AVIO_FLAG_WRITE) < 0)
- throw std::runtime_error("Error opening output video file");
- mutex.unlock();
- }
- avformat_write_header(container, NULL);
-}
-
-void libav::encoder::setPixelIntensity(int x, int y, int c, uint8_t value)
-{
- uint8_t * ptr = picture_rgb->data[0] + y * picture_rgb->linesize[0] + x * 3 + c;
- *ptr = value;
-}
-
-void libav::encoder::write_frame(float seconds,uint8_t *rgbdata)
-{
- picture_rgb->data[0]=rgbdata;
-
- // convert from RGB24 to YUV
- sws_scale(Sctx, // sws context
- picture_rgb->data, // src slice
- picture_rgb->linesize, // src stride
- 0, // src slice origin y
- pCtx->height, // src slice height
- picture_yuv->data, // dst
- picture_yuv->linesize ); // dst stride
-
- // encode the image //
- // use non-deprecated avcodec_encode_video2(...)
- AVPacket packet={0};
- av_init_packet(&packet);
- packet.data = NULL;
- packet.size = 0;
-
- //no time stamps as is
- //http://dranger.com/ffmpeg/tutorial07.html
-
- picture_yuv->pts=(uint64_t)(seconds*timebase); //
-
- int got_packet;
- int ret = avcodec_encode_video2(pCtx,
- &packet,
- picture_yuv,
- &got_packet);
-
- //packet.pts=(uint64_t)(seconds*timebase); //added 0606
- packet.stream_index = video_st->index;; //added 0606
-
- if (ret < 0)
- throw std::runtime_error("Video encoding failed");
- if (got_packet)
- {
- // std::cout << "encoding frame" << std::endl;
- int result = av_write_frame(container, &packet);
- av_destruct_packet(&packet);
- }
-}
-void libav::encoder::write_frame(float seconds,uint16_t *audiodata){
- audio_frame = avcodec_alloc_frame();
- AVPacket pkt = { 0 }; // data and size must be 0;
- int got_packet, ret;
- av_init_packet(&pkt);
- audio_frame->nb_samples = audio_input_frame_size;
- uint8_t *sampleptr;
- int bufsize=audio_input_frame_size * av_get_bytes_per_sample(aCtx->sample_fmt) *aCtx->channels;
- if (audiodata) {
- sampleptr=(uint8_t*)audiodata;
- }
- else {
- sampleptr=new uint8_t[bufsize];
- memset(sampleptr,0,bufsize);
- }
-
- audio_frame->pts=(uint64_t)(seconds*timebase); //
-
- avcodec_fill_audio_frame(audio_frame, aCtx->channels, aCtx->sample_fmt,
- sampleptr,
- audio_input_frame_size *
- av_get_bytes_per_sample(aCtx->sample_fmt) *
- aCtx->channels, 0); //;
-
-
-
- ret = avcodec_encode_audio2(aCtx, &pkt, audio_frame, &got_packet);
-
- pkt.stream_index = audio_st->index; //hardcoded stream index added 0606
- //pkt.pts=(uint64_t)(seconds*timebase); //added 060613
-
- if (!audiodata) {
- delete[] sampleptr;
- }
- if (ret < 0) {
- throw std::runtime_error("Audio encoding failed");
- }
-
- if (!got_packet)
- return;
-
- // ? pkt.stream_index = st->index;
-
- ret = av_interleaved_write_frame(container, &pkt);
- avcodec_free_frame(&audio_frame);
-}
-
-// virtual //
-libav::encoder::~encoder()
-{
-
- //avcodec_flush_buffers(pCtx); ???? from exporter version
-
-
- int result = av_write_frame(container, NULL); // flush
- result = av_write_trailer(container);
- //QMutexLocker lock(&decoder::mutex);
- mutex.lock();
- avio_close(container->pb);
- mutex.unlock();
-
- //added 0706
- video_st=nullptr;
- audio_st=nullptr;
- //
-
- for (int i = 0; i < container->nb_streams; ++i) {
- av_freep(container->streams[i]); //CRASHING HERE ON STREAM 1, OUTPUT IS VALID BUT AUDIO INAUDIBLE - 060613
- }
- av_free(container);
- container = nullptr;
- //QMutexLocker lock(&decoder::mutex);
- mutex.lock();
- avcodec_close(aCtx);
- avcodec_close(pCtx);
- mutex.unlock();
- av_free(pCtx);
- pCtx = NULL;
- av_free(aCtx);
- aCtx=nullptr;
- av_free(picture_yuv->data[0]);
- av_free(picture_yuv);
- picture_yuv = NULL;
- av_free(picture_rgb->data[0]);
- av_free(picture_rgb);
- picture_rgb = NULL;
-
-}
-*/
bool libav::exporter::setup(int w,int h, int bitRate, int frameRate, std::string container){
maybeInitFFMpegLib();
@@ -1386,45 +510,7 @@ void libav::exporter::open_video(AVFormatContext *oc, AVCodec *codec, AVStream *
void libav::exporter::write_audio_frame(AVFormatContext *oc, AVStream *st,AVPacket *pkt)
{
- /*
- AVCodecContext *c;
- AVPacket pkt = { 0 }; // data and size must be 0;
- AVFrame *frame = avcodec_alloc_frame();
- int got_packet, ret;
-
- av_init_packet(&pkt);
- c = st->codec;
-
- //get_audio_frame(samples, audio_input_frame_size, c->channels);
- frame->nb_samples = audio_input_frame_size;
- uint8_t *sampleptr;
- int bufsize=audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) *c->channels;
- if (samples) {
- sampleptr=(uint8_t*)samples;
- }
- else {
- sampleptr=new uint8_t[bufsize];
- memset(sampleptr,0,bufsize);
- }
- avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- sampleptr,
- audio_input_frame_size *
- av_get_bytes_per_sample(c->sample_fmt) *
- c->channels, 1);
-
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
- if (!samples) {
- free(sampleptr);
- }
- if (ret < 0) {
- //fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
- exit(1);
- }
-
- if (!got_packet)
- return;
- */
-
+
pkt->stream_index = st->index;
// Write the compressed frame to the media file. //
@@ -1452,40 +538,6 @@ void libav::exporter::open_video(AVFormatContext *oc, AVCodec *codec, AVStream *
AVCodecContext *c = st->codec;
-/*
- if (frame_count >= STREAM_NB_FRAMES) {
- // No more frames to compress. The codec has a latency of a few
- // * frames if using B-frames, so we get the last frames by
- // * passing the same picture again. //
- } else {
- if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
- // as we only generate a YUV420P picture, we must convert it
- // * to the codec pixel format if needed //
- if (!sws_ctx) {
- sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
- c->width, c->height, c->pix_fmt,
- sws_flags, NULL, NULL, NULL);
- if (!sws_ctx) {
- //fprintf(stderr,
- // "Could not initialize the conversion context\n");
- exit(1);
- }
- }
- fill_yuv_image(&src_picture, frame_count, c->width, c->height);
- sws_scale(sws_ctx,
- (const uint8_t * const *)src_picture.data, src_picture.linesize,
- 0, c->height, dst_picture.data, dst_picture.linesize);
- } else {
- fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
- }
- }
-*/
- //always convert RGB to YUV
- //should be context allocated once per render instead of per frame??
- //
- //
- //sws_get_context was here
-
avpicture_fill(&src_picture, pixels, PIX_FMT_RGB24, c->width,c->height);
//avpicture_fill(&dst_picture, outPixels, PIX_FMT_YUV420P, c->width,c->height);
@@ -1506,13 +558,7 @@ void libav::exporter::open_video(AVFormatContext *oc, AVCodec *codec, AVStream *
} else {
AVPacket pkt = { 0 };
int got_packet;
- //av_init_packet(&pkt); ///removed 101013 NOT NECESSARY
-
- // encode the image //
-
- // 2nd time you render it crashes right after here
-
- // where the hell is frame being allocated? is the problem caused by it being freed? (see removeal of avframe_free in cleanup)
+
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
//fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
@@ -1563,220 +609,4 @@ void libav::exporter::open_video(AVFormatContext *oc, AVCodec *codec, AVStream *
av_free(outPixels); //SIGSEV here???
mutex.unlock();
- }
-
-bool libav::audioloader::setup(const std::string &filename){
-
- maybeInitFFMpegLib();
-
- frame = avcodec_alloc_frame();
- if (!frame)
- {
- std::cout << "Error allocating the frame" << std::endl;
- return false;
- }
-
- formatContext = NULL;
- mutex.lock();
- if (avformat_open_input(&formatContext, filename.c_str(), NULL, NULL) != 0)
- {
- av_free(frame);
- std::cout << "Error opening the file" << std::endl;
- mutex.unlock();
- return false;
- }
- mutex.unlock();
-
- if (avformat_find_stream_info(formatContext, NULL) < 0)
- {
- mutex.lock();
- av_free(frame);
- avformat_close_input(&formatContext);
- mutex.unlock();
- std::cout << "Error finding the stream info" << std::endl;
- return false;
- }
-
- //use the first audio stream found
-
- audioStream = NULL;
- for (unsigned int i = 0; i < formatContext->nb_streams; ++i)
- {
- if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
- {
- audioStream = formatContext->streams[i];
- break;
- }
- }
-
- if (audioStream == NULL)
- {
- mutex.lock();
- av_free(frame);
- avformat_close_input(&formatContext);
- mutex.unlock();
- std::cout << "Could not find any audio stream in the file" << std::endl;
- return false;
- }
-
- codecContext = audioStream->codec;
-
- codecContext->codec = avcodec_find_decoder(codecContext->codec_id);
- mutex.lock();
- if (codecContext->codec == NULL)
- {
-
- av_free(frame);
- avformat_close_input(&formatContext);
- mutex.unlock();
- std::cout << "Couldn't find a proper decoder" << std::endl;
- return false;
- }
- else if (avcodec_open2(codecContext, codecContext->codec, NULL) != 0)
- {
- av_free(frame);
- avformat_close_input(&formatContext);
- mutex.unlock();
- std::cout << "Couldn't open the context with the decoder" << std::endl;
- return false;
- }
- mutex.unlock();
-
- av_dump_format(formatContext, 0, 0, false); //avformat.h line 1256
- int samples = ((formatContext->duration + 5000)*codecContext->sample_rate)/AV_TIME_BASE;
-
- std::cout << "This stream has " << codecContext->channels << " channels, a sample rate of " << codecContext->sample_rate << "Hz and "<<samples <<" samples" << std::endl;
- std::cout << "The data is in format " <<codecContext->sample_fmt<< " (aka "<< av_get_sample_fmt_name(codecContext->sample_fmt) << ") "<<std::endl;
-
- isPlanar=(av_sample_fmt_is_planar(codecContext->sample_fmt)==1);
-
- if(isPlanar) { cerr<<"found planar audio"<<endl; }
-
-
- av_init_packet(&packet);
- //sample_processed=0;
- ready=true;
- return true;
- }
-
- AVFrame* libav::audioloader::get_frame() {
-
- if (!ready) return nullptr;
-
- int frameFinished = 0;
- while (!frameFinished) {
- int ret=av_read_frame(formatContext, &packet);
- if (ret<0) {
- std::cerr << "finished with code "<<ret <<(ret==AVERROR_EOF?" ,EOF":"")<<std::endl;
- ready=false;
- return nullptr;
- }
- if (packet.stream_index == audioStream->index)
- {
- //int bytes =
- avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet);
-
- // Some frames rely on multiple packets, so we have to make sure the frame is finished before
- // we can use it
- }
- // You *must* call av_free_packet() after each call to av_read_frame() or else you'll leak memory
- av_free_packet(&packet);
- }
- return frame;
-
- }
-uint16_t* libav::audioloader::get_samples(int num){ //presumes 16bpc here and outputs interleaved samples
- //std::cerr << "request "<<num<<" samples: "<<(ready?"ready":"not ready")<<std::endl;
- //if(!ready) return nullptr;
- //shuffle down samples
-
- if (sample_start>0){
- for (int i=0;i<sample_end-sample_start;i++){
- for (int j=0;j<channels;j++) {
- buffer[(i*channels)+j]=buffer[((sample_start+i)*channels)+j];
- }
- }
- sample_start=sample_end-sample_start;
- }
-
- sample_end=sample_start;
- while (sample_end<num) {
- frame=get_frame();
- if (frame) {
- channels=av_frame_get_channels(frame); //will always reach here 1st
- if (((sample_end+std::max(num,frame->nb_samples))*channels)>buffer.size()){
- int m=buffer.size();
- int s=((sample_end+std::max(num,frame->nb_samples))*channels);
- buffer.reserve(s);
- std::cerr << "audioloader reserved buffer to " << s << std::endl;
- for (int i=m;i<s;i++) buffer.push_back(0);
- }
- for (int i=0;i<frame->nb_samples;i++) {
- for (int j=0;j<channels;j++) {
- //int frame->format
- //format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames, enum AVSampleFormat for audio)
- //int ff=frame->format;
- //uint64_t frame->channel_layout
- //Channel layout of the audio data.
- //uint64_t fcl=frame->channel_layout;
- //int frame->nb_extended_buf
- //Number of elements in extended_buf.
- //int fnb=frame->nb_extended_buf;
- //int frame->decode_error_flags
- //decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there were errors during the decoding.
- //int fde=frame->decode_error_flags;
-
-
- //uint16_t s=((uint16_t*) frame->buf[j]->data)[i];
- uint16_t s;
- if (isPlanar) {
- s=((uint16_t*) frame->buf[j]->data)[i];
- }else {
- s=((uint16_t*) frame->buf[0]->data)[j*channels+i];
- }
-
- //where is audio grunge coming from? signed/ unsigned? doesn't seem to be byte order..
- // add +1 to data subscript with no effect
-
-
- //which? must be determined by format or layout of the channels
- //ALSO some kind of HEINOUS memory leak??
- buffer[((sample_end+i)*frame->channels)+j]=s;
- //buffer[(j*frame->channels)+(sample_end+i)]= ((uint16_t*) frame->buf[j]->data)[i]; ??planar?? nope
- }
- }
- sample_end+=frame->nb_samples;
- }
- else {
- for (int i=sample_end;i<num;i++){
- for (int j=0;j<channels;j++) {
- buffer[(channels*i)+j]=0;
- }
- }
- sample_end=num;
- }
- //std::cerr<<"filling buffer to "<<((sample_end+frame->nb_samples)*frame->channels)<<std::endl;
-
-
- //avcodec_free_frame(&frame);
- }
- if (sample_end>num) {
- sample_start=num;
- }
- else {
- sample_start=0;
- }
- return (uint16_t*)(&buffer[0]);
-}
-
-bool libav::audioloader::close() {
- mutex.lock();
- av_free(frame);
- avcodec_close(codecContext);
- avformat_close_input(&formatContext);
- mutex.unlock();
- ready=false;
- sample_start=0;
- sample_end=0;
- return true;
-}
+ } \ No newline at end of file
diff --git a/rotord/src/libavwrapper.h b/rotord/src/libavwrapper.h
index 4b24b02..77832a7 100644
--- a/rotord/src/libavwrapper.h
+++ b/rotord/src/libavwrapper.h
@@ -1,40 +1,6 @@
#ifndef libavwrapper_H
#define libavwrapper_H
-/*
- * libavwrapper.h
- * May 2012 Christopher Bruns
- * The libavwrapper class is a C++ wrapper around the poorly documented
- * libavcodec movie API used by ffmpeg. I made extensive use of Nathan
- * Clack's implemention in the whisk project.
- *
- * The libavwrapper.h and libavwrapper.cpp files depend only on the libavcodec
- * and allied sets of libraries. To compartmentalize and reduce dependencies
- * I placed the Vaa3d specific use of this class into a separate set of
- * source files: loadV3dFFMpeg.h/cpp
- */
-
-////////////////////////
-//now that we have guards
-//instead of crashing instantly when the 2nd thread tries to encode a frame, we get an error
-
- //*** Error in `./rotord': corrupted double-linked list: 0x00007f3c31b1b630 ***
-
- //or
-
- //*** Error in `./rotord': double free or corruption (out): 0x00007f3bf8210080 ***
- ///////////////////////
-
-
-//http://blog.tomaka17.com/2012/03/libavcodeclibavformat-tutorial/
-//great to use c++11 features
-
-
-//http://dranger.com/ffmpeg/
-//the mnost up to date tutorial?
-
-//https://github.com/lbrandy/ffmpeg-fas/tree/master
-//try this!
#ifndef UINT64_C
#define UINT64_C(c) (c ## ULL)
@@ -47,21 +13,9 @@ extern "C" {
#include <libavformat/avformat.h>
#include <libavutil/pixfmt.h>
#include <libavutil/opt.h>
-//#include <libavutil/imgutils.h>
-//#include <libavutil/samplefmt.h>
-
-#include <libswscale/swscale.h> //?
+#include <libswscale/swscale.h>
}
-/*
-#include <QFile>
-#include <QNetworkAccessManager>
-#include <QMutex>
-#include <QUrl>
-#include <QBuffer>
-*/
-
-
#include <string>
#include <stdexcept>
#include <iostream>
@@ -72,90 +26,12 @@ extern "C" {
#include <ffms.h>
-
namespace libav {
-
-
-
static bool b_is_one_time_inited=false;
- // Some libavcodec calls are not reentrant
-
+ // Some libavcodec calls are not reentrant
void maybeInitFFMpegLib();
-
static int sws_flags = SWS_BICUBIC;
-// Translated to C++ by Christopher Bruns May 2012
-// from ffmeg_adapt.c in whisk package by Nathan Clack, Mark Bolstadt, Michael Meeuwisse
- class decoder
- {
- public:
- enum Channel {
- RED = 0,
- GRAY = 0,
- GREEN = 1,
- BLUE = 2,
- ALPHA = 3
- };
-
-
- decoder(PixelFormat pixelFormat=PIX_FMT_RGB24);
- //decoder(QUrl url, PixelFormat pixelFormat=PIX_FMT_RGB24);
- void cleanup();
- virtual ~decoder();
- //bool open(QUrl url, enum PixelFormat formatParam = PIX_FMT_RGB24);
- //bool open(QIODevice& fileStream, QString& fileName, enum PixelFormat formatParam = PIX_FMT_RGB24);
- bool reinit_buffers_and_scaler();
- bool init_buffers_and_scaler();
- uint8_t getPixelIntensity(int x, int y, Channel c = GRAY) const;
- bool fetchFrame(int targetFrameIndex = 0);
- bool fetchFrame(int w,int h,int targetFrameIndex = 0);
- int getNumberOfFrames() const;
- int getWidth() const;
- int getHeight() const;
- float getFrameRate() const{return framerate;};
- int getNumberOfChannels() const;
- bool readNextFrame(int targetFrameIndex = 0);
- bool readNextFrameWithPacket(int targetFrameIndex, AVPacket& packet, AVFrame* pYuv);
- int seekToFrame(int targetFrameIndex = 0);
- int seekToFrameNew(int targetFrameIndex = 0);
-
- // make certain members public, for use by Fast3DTexture class
- AVFrame *pFrameRGB;
- AVFrame *pRaw;
- AVFormatContext *container;
- AVCodecContext *pCtx;
- int videoStream;
- int previousFrameIndex;
- bool isOpen;
-
-
- bool open(std::string& fileName, enum PixelFormat formatParam = PIX_FMT_RGB24);
- bool open(char* fileName, enum PixelFormat formatParam = PIX_FMT_RGB24);
-
- protected:
-
-
- void initialize();
-
- bool openUsingInitializedContainer(enum PixelFormat formatParam = PIX_FMT_RGB24 );
- static bool avtry(int result, const std::string& msg);
-
- AVCodec *pCodec;
- uint8_t *buffer,
- *blank;
- //struct
- SwsContext *Sctx;
- int width, height;
- PixelFormat format;
- size_t numBytes;
- int numFrames;
- int sc; // number of color channels
- float framerate;
- //NB representing framerate as a float implies that
- //ABOVE ~93 HOURS AT 25FPS the calculations will be inaccurate
-
- };
-
class video_decoder
{
public:
@@ -283,40 +159,6 @@ namespace libav {
bool loaded;
};
-/*
- // TODO - finish refactoring based on
- // http://svn.gnumonks.org/trunk/21c3-video/ffmpeg/ffmpeg-0.4.9-pre1/output_example.c
- class encoder
- {
- public:
- //typedef encoder::Channel Channel;
-
- encoder(const char * file_name, int width, int height, float _framerate=25.0f, enum AVCodecID codec_id = CODEC_ID_H264);
- virtual ~encoder();
- void setPixelIntensity(int x, int y, int c, uint8_t value);
- void write_frame(float seconds,uint8_t *rgbdata);
- void write_frame(float seconds,uint16_t *audiodata);
- int get_audio_framesize(){ return audio_input_frame_size; }
- float get_audio_step(){return audiostep;};
-
- protected:
- AVFormatContext *container;
- AVCodecContext *pCtx;
- AVFrame *picture_yuv;
- AVFrame *picture_rgb;
- AVFrame *audio_frame;
- float timebase;
- struct SwsContext *Sctx;
-
- AVStream *audio_st;
- AVStream *video_st;
-
- AVCodecContext *aCtx;
- int audio_input_frame_size;
- float audiostep;
- };
-*/
-
class exporter {
public:
exporter(){
@@ -385,31 +227,6 @@ namespace libav {
};
- class audioloader{
- public:
- audioloader(){ready=false;sample_start=0;sample_end=0;};
- bool setup(const std::string &filename);
- AVFrame* get_frame();
- uint16_t* get_samples(int num);
- bool close();
- bool ready;
-
- AVCodecContext* codecContext;
- AVFormatContext* formatContext;
- int channels; //necessary to handle final packet -- unititialised after load/ problem?
- private:
- std::vector<uint16_t> buffer;
- AVFrame* frame;
-
- AVStream* audioStream;
-
- AVPacket packet;
- int sample_end;
- int sample_start;
- bool isPlanar;
-
- };
-
}
diff --git a/rotord/src/rotor.cpp b/rotord/src/rotor.cpp
index d26cdec..d75b9ed 100644
--- a/rotord/src/rotor.cpp
+++ b/rotord/src/rotor.cpp
@@ -79,60 +79,3 @@ float Parameter::get(const Time_spec& time){ //gets input and updates variable
}
return value;
}
-
-bool _Video_loader::load(const string &_filename){
- Logger& logger = Logger::get("Rotor");
- if (isLoaded) {
- player.cleanup(); ///should be in decoder class?
- isLoaded=false;
- }
- Poco::Path path;
- string uri="file://"+_filename;
- isLoaded=player.open(uri);
- if (isLoaded){
- logger.information("Video_loader loaded "+_filename+": "\
- +toString(player.getNumberOfFrames())+" frames, "\
- +toString(player.getFrameRate())+" fps, "\
- +toString(player.getWidth())+"x"+toString(player.getHeight())\
- +", channels:"+toString(player.getNumberOfChannels()));
- return true;
- }
-
- logger.error("Video_loader failed to load "+_filename);
-
- return false;
-}
-Image* _Video_loader::output(const Frame_spec &frame){
-
- if (isLoaded){
- //this approach is running into the inability to seek when requesting playback speed > 1.
- //need to cache frames so as to avoid asking for a frame other than the next one.
- //need an algorithm to find the previous keyframe and seek forward
-
- float clipframerate=(parameters["framerate"]->value==0.0f?player.getFrameRate():parameters["framerate"]->value);
-
- float clipspeed=(clipframerate/frame.framerate)*parameters["speed"]->value;
-
- int wanted;
- if (attributes["mode"]->intVal==VIDEOFRAMES_frame) {
- wanted=(((int) ((frame.time*frame.framerate)+0.5))%max(1,player.getNumberOfFrames()-1))+1; //+1 is necessary because 1st frame in a video is number 1?
- }
- if (attributes["mode"]->intVal==VIDEOFRAMES_blend) {
- wanted=(((int) ((frame.time*frame.framerate*clipspeed)+0.5))%max(1,player.getNumberOfFrames()-1))+1; //+1 is necessary because 1st frame in a video is number 1?
- }
-
- if (wanted!=lastframe){
- if (!player.fetchFrame(frame.w,frame.h,wanted)) { //seek fail
- Logger& logger = Logger::get("Rotor");
- logger.error("Video_loader failed to seek frame "+toString(wanted)+" of "+attributes["filename"]->value);
-
- if (image.w>0) return &image; //just return the previous frame if possible
- else return nullptr;
- }
- image.setup_fromRGB(frame.w,frame.h,player.pFrameRGB->data[0],player.pFrameRGB->linesize[0]-(frame.w*3));
- lastframe=wanted;
- }
- return &image;
- }
- return nullptr;
-};
diff --git a/rotord/src/rotor.h b/rotord/src/rotor.h
index 447ba51..d7480c2 100644
--- a/rotord/src/rotor.h
+++ b/rotord/src/rotor.h
@@ -878,33 +878,6 @@ namespace Rotor {
};
#define VIDEOFRAMES_frame 1
#define VIDEOFRAMES_blend 2
- class _Video_loader: public Image_node {
- public:
- _Video_loader(){
- create_parameter("speed","number","video playback speed","Speed",1.0f,0.0f,0.0f);
- create_parameter("framerate","number","framerate override","Frame rate",0.0f,0.0f,0.0f);
- create_attribute("filename","name of video file to load","File name","");
- create_attribute("mode","frame mode","Mode","frame",{"frame","blend"});
- title="Video loader";
- description="Loads a video file";
- };
- _Video_loader(map<string,string> &settings): _Video_loader() {
- base_settings(settings);
- isLoaded=false;
- if (attributes["filename"]->value!="") {
- load(find_setting(settings,"media_path","")+attributes["filename"]->value);
- }
- lastframe=0;
- };
- ~_Video_loader(){};
- bool load(const string &filename);
- Image *output(const Frame_spec &frame);
- _Video_loader* clone(map<string,string> &_settings) { return new _Video_loader(_settings);};
- bool isLoaded;
- private:
- libav::decoder player;
- int lastframe;
- };
class Video_loader: public Image_node {
public:
Video_loader(){