summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-x[-rw-r--r--]rotord/01.xml2
-rw-r--r--rotord/avCodec.cpp574
-rw-r--r--rotord/avCodec.h35
-rwxr-xr-x[-rw-r--r--]rotord/rotor.cpp57
-rwxr-xr-xrotord/rotor.h68
-rwxr-xr-xrotord/rotord.h1
6 files changed, 55 insertions, 682 deletions
diff --git a/rotord/01.xml b/rotord/01.xml
index e351184..f3abb80 100644..100755
--- a/rotord/01.xml
+++ b/rotord/01.xml
@@ -7,7 +7,7 @@
<node ID="03" type="divide" amount="2.0" output="signal">signal divide
<signal_input from="01">signal to divide</signal_input>
</node>
- <node ID="04" type="==" output="signal">outputs 0 except when signal first passes a new integer: then 1
+ <node ID="04" type="bang" output="signal">outputs a single 1 every time signal enters a new number
<signal_input from="03">signal to analyse</signal_input>
</node>
<node ID="05" type="signal_output">outputs data when changed
diff --git a/rotord/avCodec.cpp b/rotord/avCodec.cpp
deleted file mode 100644
index 36b92a6..0000000
--- a/rotord/avCodec.cpp
+++ /dev/null
@@ -1,574 +0,0 @@
-#include "avCodec.h"
-
-#define INBUF_SIZE 4096
-#define AUDIO_INBUF_SIZE 20480
-#define AUDIO_REFILL_THRESH 4096
-
-/* check that a given sample format is supported by the encoder */
-static int avCodec::check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
-{
- const enum AVSampleFormat *p = codec->sample_fmts;
-
- while (*p != AV_SAMPLE_FMT_NONE) {
- if (*p == sample_fmt)
- return 1;
- p++;
- }
- return 0;
-}
-
-/* just pick the highest supported samplerate */
-static int avCodec::select_sample_rate(AVCodec *codec)
-{
- const int *p;
- int best_samplerate = 0;
-
- if (!codec->supported_samplerates)
- return 44100;
-
- p = codec->supported_samplerates;
- while (*p) {
- best_samplerate = FFMAX(*p, best_samplerate);
- p++;
- }
- return best_samplerate;
-}
-
-/* select layout with the highest channel count */
-static int avCodec::select_channel_layout(AVCodec *codec)
-{
- const uint64_t *p;
- uint64_t best_ch_layout = 0;
- int best_nb_channells = 0;
-
- if (!codec->channel_layouts)
- return AV_CH_LAYOUT_STEREO;
-
- p = codec->channel_layouts;
- while (*p) {
- int nb_channels = av_get_channel_layout_nb_channels(*p);
-
- if (nb_channels > best_nb_channells) {
- best_ch_layout = *p;
- best_nb_channells = nb_channels;
- }
- p++;
- }
- return best_ch_layout;
-}
-
-/*
- * Audio encoding example
- */
-static void avCodec::audio_encode_example(const char *filename)
-{
- AVCodec *codec;
- AVCodecContext *c= NULL;
- AVFrame *frame;
- AVPacket pkt;
- int i, j, k, ret, got_output;
- int buffer_size;
- FILE *f;
- uint16_t *samples;
- float t, tincr;
-
- printf("Encode audio file %s\n", filename);
-
- /* find the MP2 encoder */
- codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
- if (!codec) {
- fprintf(stderr, "Codec not found\n");
- exit(1);
- }
-
- c = avcodec_alloc_context3(codec);
- if (!c) {
- fprintf(stderr, "Could not allocate audio codec context\n");
- exit(1);
- }
-
- /* put sample parameters */
- c->bit_rate = 64000;
-
- /* check that the encoder supports s16 pcm input */
- c->sample_fmt = AV_SAMPLE_FMT_S16;
- if (!check_sample_fmt(codec, c->sample_fmt)) {
- fprintf(stderr, "Encoder does not support sample format %s",
- av_get_sample_fmt_name(c->sample_fmt));
- exit(1);
- }
-
- /* select other audio parameters supported by the encoder */
- c->sample_rate = select_sample_rate(codec);
- c->channel_layout = select_channel_layout(codec);
- c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
-
- /* open it */
- if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "Could not open codec\n");
- exit(1);
- }
-
- f = fopen(filename, "wb");
- if (!f) {
- fprintf(stderr, "Could not open %s\n", filename);
- exit(1);
- }
-
- /* frame containing input raw audio */
- frame = avcodec_alloc_frame();
- if (!frame) {
- fprintf(stderr, "Could not allocate audio frame\n");
- exit(1);
- }
-
- frame->nb_samples = c->frame_size;
- frame->format = c->sample_fmt;
- frame->channel_layout = c->channel_layout;
-
- /* the codec gives us the frame size, in samples,
- * we calculate the size of the samples buffer in bytes */
- buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
- c->sample_fmt, 0);
- samples = (uint16_t*)av_malloc(buffer_size);
- if (!samples) {
- fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
- buffer_size);
- exit(1);
- }
- /* setup the data pointers in the AVFrame */
- ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- (const uint8_t*)samples, buffer_size, 0);
- if (ret < 0) {
- fprintf(stderr, "Could not setup audio frame\n");
- exit(1);
- }
-
- /* encode a single tone sound */
- t = 0;
- tincr = 2 * M_PI * 440.0 / c->sample_rate;
- for(i=0;i<200;i++) {
- av_init_packet(&pkt);
- pkt.data = NULL; // packet data will be allocated by the encoder
- pkt.size = 0;
-
- for (j = 0; j < c->frame_size; j++) {
- samples[2*j] = (int)(sin(t) * 10000);
-
- for (k = 1; k < c->channels; k++)
- samples[2*j + k] = samples[2*j];
- t += tincr;
- }
- /* encode the samples */
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
- if (ret < 0) {
- fprintf(stderr, "Error encoding audio frame\n");
- exit(1);
- }
- if (got_output) {
- fwrite(pkt.data, 1, pkt.size, f);
- av_free_packet(&pkt);
- }
- }
-
- /* get the delayed frames */
- for (got_output = 1; got_output; i++) {
- ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
- if (ret < 0) {
- fprintf(stderr, "Error encoding frame\n");
- exit(1);
- }
-
- if (got_output) {
- fwrite(pkt.data, 1, pkt.size, f);
- av_free_packet(&pkt);
- }
- }
- fclose(f);
-
- av_freep(&samples);
- avcodec_free_frame(&frame);
- avcodec_close(c);
- av_free(c);
-}
-
-/*
- * Audio decoding.
- */
-static void avCodec::audio_decode_example(const char *outfilename, const char *filename)
-{
- AVCodec *codec;
- AVCodecContext *c= NULL;
- int len;
- FILE *f, *outfile;
- uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
- AVPacket avpkt;
- AVFrame *decoded_frame = NULL;
-
- av_init_packet(&avpkt);
-
- printf("Decode audio file %s to %s\n", filename, outfilename);
-
- /* find the mpeg audio decoder */
- codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
- if (!codec) {
- fprintf(stderr, "Codec not found\n");
- exit(1);
- }
-
- c = avcodec_alloc_context3(codec);
- if (!c) {
- fprintf(stderr, "Could not allocate audio codec context\n");
- exit(1);
- }
-
- /* open it */
- if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "Could not open codec\n");
- exit(1);
- }
-
- f = fopen(filename, "rb");
- if (!f) {
- fprintf(stderr, "Could not open %s\n", filename);
- exit(1);
- }
- outfile = fopen(outfilename, "wb");
- if (!outfile) {
- av_free(c);
- exit(1);
- }
-
- /* decode until eof */
- avpkt.data = inbuf;
- avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
-
- while (avpkt.size > 0) {
- int got_frame = 0;
-
- if (!decoded_frame) {
- if (!(decoded_frame = avcodec_alloc_frame())) {
- fprintf(stderr, "Could not allocate audio frame\n");
- exit(1);
- }
- } else
- avcodec_get_frame_defaults(decoded_frame);
-
- len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
- if (len < 0) {
- fprintf(stderr, "Error while decoding\n");
- exit(1);
- }
- if (got_frame) {
- /* if a frame has been decoded, output it */
- int data_size = av_samples_get_buffer_size(NULL, c->channels,
- decoded_frame->nb_samples,
- c->sample_fmt, 1);
- fwrite(decoded_frame->data[0], 1, data_size, outfile);
- }
- avpkt.size -= len;
- avpkt.data += len;
- avpkt.dts =
- avpkt.pts = AV_NOPTS_VALUE;
- if (avpkt.size < AUDIO_REFILL_THRESH) {
- /* Refill the input buffer, to avoid trying to decode
- * incomplete frames. Instead of this, one could also use
- * a parser, or use a proper container format through
- * libavformat. */
- memmove(inbuf, avpkt.data, avpkt.size);
- avpkt.data = inbuf;
- len = fread(avpkt.data + avpkt.size, 1,
- AUDIO_INBUF_SIZE - avpkt.size, f);
- if (len > 0)
- avpkt.size += len;
- }
- }
-
- fclose(outfile);
- fclose(f);
-
- avcodec_close(c);
- av_free(c);
- avcodec_free_frame(&decoded_frame);
-}
-
-/*
- * Video encoding example
- */
-static void avCodec::video_encode_example(const char *filename, int codec_id)
-{
- AVCodec *codec;
- AVCodecContext *c= NULL;
- int i, ret, x, y, got_output;
- FILE *f;
- AVFrame *frame;
- AVPacket pkt;
- uint8_t endcode[] = { 0, 0, 1, 0xb7 };
-
- printf("Encode video file %s\n", filename);
-
- /* find the mpeg1 video encoder */
- codec =avcodec_find_encoder((AVCodecID)codec_id);
- if (!codec) {
- fprintf(stderr, "Codec not found\n");
- exit(1);
- }
-
- c = avcodec_alloc_context3(codec);
- if (!c) {
- fprintf(stderr, "Could not allocate video codec context\n");
- exit(1);
- }
-
- /* put sample parameters */
- c->bit_rate = 400000;
- /* resolution must be a multiple of two */
- c->width = 352;
- c->height = 288;
- /* frames per second */
- c->time_base= (AVRational){1,25};
- c->gop_size = 10; /* emit one intra frame every ten frames */
- c->max_b_frames=1;
- c->pix_fmt = AV_PIX_FMT_YUV420P;
-
- if(codec_id == AV_CODEC_ID_H264)
- av_opt_set(c->priv_data, "preset", "slow", 0);
-
- /* open it */
- if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "Could not open codec\n");
- exit(1);
- }
-
- f = fopen(filename, "wb");
- if (!f) {
- fprintf(stderr, "Could not open %s\n", filename);
- exit(1);
- }
-
- frame = avcodec_alloc_frame();
- if (!frame) {
- fprintf(stderr, "Could not allocate video frame\n");
- exit(1);
- }
- frame->format = c->pix_fmt;
- frame->width = c->width;
- frame->height = c->height;
-
- /* the image can be allocated by any means and av_image_alloc() is
- * just the most convenient way if av_malloc() is to be used */
- ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
- c->pix_fmt, 32);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate raw picture buffer\n");
- exit(1);
- }
-
- /* encode 1 second of video */
- for(i=0;i<25;i++) {
- av_init_packet(&pkt);
- pkt.data = NULL; // packet data will be allocated by the encoder
- pkt.size = 0;
-
- fflush(stdout);
- /* prepare a dummy image */
- /* Y */
- for(y=0;y<c->height;y++) {
- for(x=0;x<c->width;x++) {
- frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
- }
- }
-
- /* Cb and Cr */
- for(y=0;y<c->height/2;y++) {
- for(x=0;x<c->width/2;x++) {
- frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
- frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
- }
- }
-
- frame->pts = i;
-
- /* encode the image */
- ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
- if (ret < 0) {
- fprintf(stderr, "Error encoding frame\n");
- exit(1);
- }
-
- if (got_output) {
- printf("Write frame %3d (size=%5d)\n", i, pkt.size);
- fwrite(pkt.data, 1, pkt.size, f);
- av_free_packet(&pkt);
- }
- }
-
- /* get the delayed frames */
- for (got_output = 1; got_output; i++) {
- fflush(stdout);
-
- ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
- if (ret < 0) {
- fprintf(stderr, "Error encoding frame\n");
- exit(1);
- }
-
- if (got_output) {
- printf("Write frame %3d (size=%5d)\n", i, pkt.size);
- fwrite(pkt.data, 1, pkt.size, f);
- av_free_packet(&pkt);
- }
- }
-
- /* add sequence end code to have a real mpeg file */
- fwrite(endcode, 1, sizeof(endcode), f);
- fclose(f);
-
- avcodec_close(c);
- av_free(c);
- av_freep(&frame->data[0]);
- avcodec_free_frame(&frame);
- printf("\n");
-}
-
-/*
- * Video decoding example
- */
-
-static void avCodec::pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
- char *filename)
-{
- FILE *f;
- int i;
-
- f=fopen(filename,"w");
- fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255);
- for(i=0;i<ysize;i++)
- fwrite(buf + i * wrap,1,xsize,f);
- fclose(f);
-}
-
-static int avCodec::decode_write_frame(const char *outfilename, AVCodecContext *avctx,
- AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
-{
- int len, got_frame;
- char buf[1024];
-
- len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
- if (len < 0) {
- fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
- return len;
- }
- if (got_frame) {
- printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
- fflush(stdout);
-
- /* the picture is allocated by the decoder, no need to free it */
- snprintf(buf, sizeof(buf), outfilename, *frame_count);
- pgm_save(frame->data[0], frame->linesize[0],
- avctx->width, avctx->height, buf);
- (*frame_count)++;
- }
- if (pkt->data) {
- pkt->size -= len;
- pkt->data += len;
- }
- return 0;
-}
-
-static void avCodec::video_decode_example(const char *outfilename, const char *filename)
-{
- AVCodec *codec;
- AVCodecContext *c= NULL;
- int frame_count;
- FILE *f;
- AVFrame *frame;
- uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
- AVPacket avpkt;
-
- av_init_packet(&avpkt);
-
- /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
- memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
-
- printf("Decode video file %s to %s\n", filename, outfilename);
-
- /* find the mpeg1 video decoder */
- codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
- if (!codec) {
- fprintf(stderr, "Codec not found\n");
- exit(1);
- }
-
- c = avcodec_alloc_context3(codec);
- if (!c) {
- fprintf(stderr, "Could not allocate video codec context\n");
- exit(1);
- }
-
- if(codec->capabilities&CODEC_CAP_TRUNCATED)
- c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
-
- /* For some codecs, such as msmpeg4 and mpeg4, width and height
- MUST be initialized there because this information is not
- available in the bitstream. */
-
- /* open it */
- if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "Could not open codec\n");
- exit(1);
- }
-
- f = fopen(filename, "rb");
- if (!f) {
- fprintf(stderr, "Could not open %s\n", filename);
- exit(1);
- }
-
- frame = avcodec_alloc_frame();
- if (!frame) {
- fprintf(stderr, "Could not allocate video frame\n");
- exit(1);
- }
-
- frame_count = 0;
- for(;;) {
- avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
- if (avpkt.size == 0)
- break;
-
- /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
- and this is the only method to use them because you cannot
- know the compressed data size before analysing it.
-
- BUT some other codecs (msmpeg4, mpeg4) are inherently frame
- based, so you must call them with all the data for one
- frame exactly. You must also initialize 'width' and
- 'height' before initializing them. */
-
- /* NOTE2: some codecs allow the raw parameters (frame size,
- sample rate) to be changed at any frame. We handle this, so
- you should also take care of it */
-
- /* here, we use a stream based decoder (mpeg1video), so we
- feed decoder and see if it could decode a frame */
- avpkt.data = inbuf;
- while (avpkt.size > 0)
- if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
- exit(1);
- }
-
- /* some codecs, such as MPEG, transmit the I and P frame with a
- latency of one frame. You must do the following to have a
- chance to get the last frame of the video */
- avpkt.data = NULL;
- avpkt.size = 0;
- decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
-
- fclose(f);
-
- avcodec_close(c);
- av_free(c);
- avcodec_free_frame(&frame);
- printf("\n");
-} \ No newline at end of file
diff --git a/rotord/avCodec.h b/rotord/avCodec.h
deleted file mode 100644
index 23f45f8..0000000
--- a/rotord/avCodec.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// libav includes
-// from /usr/include
-
-extern "C" {
- #ifndef __STDC_CONSTANT_MACROS
- # define __STDC_CONSTANT_MACROS
- #endif
-
- #ifndef UINT64_C
- #define UINT64_C(c) (c ## ULL)
- #endif
-
- #include <math.h>
-
- #include "libavcodec/avcodec.h"
-
- #include "libavutil/opt.h"
- #include "libavutil/channel_layout.h"
- #include "libavutil/common.h"
- #include "libavutil/imgutils.h"
- #include "libavutil/mathematics.h"
- #include "libavutil/samplefmt.h"
-}
-
-namespace avCodec {
- static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt);
- static void audio_decode_example(const char *outfilename, const char *filename);
- static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,char *filename);
- static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,AVFrame *frame, int *frame_count, AVPacket *pkt, int last);
- static int select_channel_layout(AVCodec *codec);
- static void video_encode_example(const char *filename, int codec_id);
- static void audio_encode_example(const char *filename);
- static int select_sample_rate(AVCodec *codec);
- static void video_decode_example(const char *outfilename, const char *filename);
-} \ No newline at end of file
diff --git a/rotord/rotor.cpp b/rotord/rotor.cpp
index f9c39b3..5736c2f 100644..100755
--- a/rotord/rotor.cpp
+++ b/rotord/rotor.cpp
@@ -19,27 +19,31 @@ void Render_context::runTask() {
work_queue.pop_front();
}
mutex.unlock();
- switch(cmd) {
- case ANALYSE_AUDIO:
- state=ANALYSING_AUDIO;
- vector<Base_audio_processor*> processors;
- processors.push_back(audio_thumb);
- vector<Node*> analysers=graph.find_nodes("audio_analysis");
- for (auto a: analysers) {
- processors.push_back(a);
- }
- if (load_audio(audio_filename,processors)) {
- state=AUDIO_READY;
- }
- else {
- //an error occurred: TODO have to clean up allocated data. autoptr?
- state=IDLE;
- }
- break;
- case RENDER:
- state=RENDERING;
- ///
- break;
+ if(cmd==ANALYSE_AUDIO) {
+ state=ANALYSING_AUDIO;
+ vector<Base_audio_processor*> processors;
+ processors.push_back(audio_thumb);
+ vector<Node*> analysers=graph.find_nodes("audio_analysis");
+ for (auto a: analysers) {
+ processors.push_back(a);
+ }
+ if (load_audio(audio_filename,processors)) {
+ state=AUDIO_READY;
+ }
+ else {
+ //an error occurred: TODO have to clean up allocated data. autoptr?
+ state=IDLE;
+ }
+ }
+ if(cmd==RENDER) {
+ state=RENDERING;
+ if(graph.video_render(output_filename,audio_filename,output_framerate)){
+ state=RENDER_READY;
+ }
+ else {
+ //an error occurred: TODO have to clean up allocated data. autoptr?
+ state=IDLE;
+ }
}
sleep(100);
}
@@ -186,7 +190,7 @@ Command_response Render_context::session_command(const std::vector<std::string>&
framerate=ofToFloat(command[3]);
}
string signal_xml;
- if (graph.signal_render(framerate,signal_xml)){
+ if (graph.signal_render(signal_xml,framerate)){
response.status=HTTPResponse::HTTP_OK;
response.description=signal_xml;
}
@@ -250,7 +254,10 @@ Command_response Render_context::session_command(const std::vector<std::string>&
if (command[0]=="PUT") {
if (command.size()>2) {
if (state==IDLE) {
- output_filename=command[3]; //for now, store session variables in memory
+ output_filename=command[3];
+ if (command.size()>3) {
+ output_framerate=ofToFloat(command[4]);
+ }
add_queue(RENDER);
response.status=HTTPResponse::HTTP_OK;
response.description="<status context='"+command[1]+"'>Starting render: "+command[3]+"</status>\n";
@@ -497,7 +504,7 @@ Node_factory::Node_factory(){
//for now, statically load prototype map in constructor
add_type("audio_analysis",new Audio_analysis());
add_type("divide",new Signal_divide());
- add_type("==",new Is_new_integer());
+ add_type("bang",new Is_new_integer());
add_type("signal_output",new Signal_output());
add_type("video_output",new Video_output());
}
@@ -597,6 +604,6 @@ void Audio_analysis::print_features(){
cerr<<i.second<<" "<<i.first<<endl;
}
}
-bool Video_output::render(const float duration, const float framerate,string &filename){
+bool Video_output::render(const float duration, const float framerate,const string &output_filename,const string &audio_filename){
//render out the network
} \ No newline at end of file
diff --git a/rotord/rotor.h b/rotord/rotor.h
index 699aa8a..b0bfad2 100755
--- a/rotord/rotor.h
+++ b/rotord/rotor.h
@@ -92,7 +92,7 @@ extern "C" {
#include "vampHost.h"
#include "xmlIO.h"
-#include "avCodec.h"
+//#include "avCodec.h"
namespace Rotor {
#define IDLE 0
@@ -183,7 +183,7 @@ namespace Rotor {
class Node{
public:
virtual Node* clone(map<string,string> &_settings)=0;
- UUID uid; //every usable node has a UUID
+ UUID uid; //every usable node has a UUID
int id;
vector<Signal_input*> inputs; //simple node can have signal inputs, output depends on node type
void create_signal_input(const string &description) {inputs.push_back(new Signal_input(description));};
@@ -205,28 +205,17 @@ namespace Rotor {
class Signal_node: public Node{
public:
virtual float get_output(const Time_spec &time) { return 0.0f; };
-
- /*{ //default is to pass through first input, if disconnected returns 0
- cerr << "getting output for " << type << "," << ID << endl;
- if (inputs.size()) {
- if (inputs[0]->connection) return ((Signal_node*)(inputs[0]->connection))->get_output(time);
- }
- return 0.0f;
- }
- */
};
class Image_node: public Node{
public:
- vector<Image_input> image_inputs; //image node also has image inputs and outputs
- Image* get_output(const Time_spec &time){ //sample implementation
-
- //do something with the inputs
-
- //and then
+ vector<Image_input> image_inputs; //image node also has image inputs and outputs
+ Image* get_output(const Time_spec &time){ //sample implementation
+ //do something with the inputs
+ //and then
return ((Image_node*)image_inputs[0].connection)->get_output(time);
}
void get_preview(const Time_spec &time);
- Image* image; //this can be privately allocated or just passed on as the node see fit
+ Image* image; //this can be privately allocated or just passed on as the node see fit
private:
float image_time;
};
@@ -273,7 +262,6 @@ namespace Rotor {
vampHost::Analyser analyser;
};
class Signal_divide: public Signal_node {
- //divides incoming signal by a fixed amount
public:
Signal_divide(){};
Signal_divide(map<string,string> &settings) {
@@ -290,10 +278,6 @@ namespace Rotor {
float divide_amount;
};
class Is_new_integer: public Signal_node {
- //outputs a 1 every time a signal passes a new integer, otherwise a 0.
- //this requires knowing what the framerate is? how to do this?
- //for now, assume 25
- //what to cache? for now, don't cache
public:
Is_new_integer(){};
Is_new_integer(map<string,string> &settings) {
@@ -333,7 +317,7 @@ namespace Rotor {
base_settings(settings);
};
Video_output* clone(map<string,string> &_settings) { return new Video_output(_settings);};
- bool render(const float duration, const float framerate,string &filename);
+ bool render(const float duration, const float framerate,const string &output_filename,const string &audio_filename);
};
//-------------------------------------------------------------------
class Node_factory{
@@ -355,9 +339,9 @@ namespace Rotor {
};
class Graph{
public:
- Graph(){framerate=25.0f;duration=10.0f;loaded = false;};
+ Graph(){duration=10.0f;loaded = false;};
Graph(const string& _uid,const string& _desc){init(_uid,_desc);};
- void init(const string& _uid,const string& _desc){ uid=_uid;description=_desc;framerate=25.0f;duration=10.0f;};
+ void init(const string& _uid,const string& _desc){ uid=_uid;description=_desc;duration=10.0f;};
string uid; //every version of a graph has a UUID, no particular need to actually read its data(?)
//?? is it faster than using strings??
string description;
@@ -375,41 +359,31 @@ namespace Rotor {
}
return nullptr; //can be tested against
};
- bool signal_render(const float _fr,string &signal_xml) {
- if (_fr>.001) framerate=_fr;
+ bool signal_render(string &signal_xml,const float framerate) {
if (find_node("signal_output")) {
Signal_output *signal_output=dynamic_cast<Signal_output*>(find_node("signal_output"));
return signal_output->render(duration,framerate,signal_xml);
}
else return false;
}
- bool video_render(const float _fr,string &filename) {
- //things to manage:
- //audio location is known by render context
- //video_output can generate frames
- //where do these come together?
- if (_fr>.001) framerate=_fr;
+ bool video_render(const string &output_filename,const string &audio_filename,const float framerate) {
if (find_node("video_output")) {
Video_output *video_output=dynamic_cast<Video_output*>(find_node("video_output"));
- return video_output->render(duration,framerate,filename,);
+ return video_output->render(duration,framerate,output_filename,audio_filename);
}
else return false;
}
int load(Poco::UUID uid);
bool load(string &graph_filename);
- UUID save(); //save to DB, returns UUID of saved graph
+ UUID save(); //save to DB, returns UUID of saved graph
bool loaded;
float duration;
const string toString();
private:
Node_factory factory;
- float framerate;
xmlIO xml;
};
class Audio_thumbnailer: public Base_audio_processor {
- //how to deal with the fact that frames don't correspond with pixels?
- //buffer the data somehow
- //draw pixels based on rms value
public:
Audio_thumbnailer(){
height=32;
@@ -428,24 +402,24 @@ namespace Rotor {
uint8_t *data;
int height,width,samples_per_column;
int column,out_sample,sample,samples;
- //for drawing graph
int offset;
double scale,accum;
};
class Render_context: public Poco::Task { //Poco task object
- //manages a 'patchbay'
- //high level interfaces for the wizard
- //and low level interface onto the graph
+ //manages a 'patchbay'
+ //high level interfaces for the wizard
+ //and low level interface onto the graph
public:
Render_context(const std::string& name): Task(name) {
audio_thumb=new Audio_thumbnailer();
state=IDLE;
+ output_framerate=25.0f;
};
void runTask();
void add_queue(int item);
Command_response session_command(const std::vector<std::string>& command);
Render_status get_status();
- void cancel(); //interrupt locking process
+ void cancel(); //interrupt locking process
int make_preview(int nodeID, float time); //starts a frame preview - returns status code - how to retrieve?
bool load_audio(const string &filename,vector<Base_audio_processor*> processors);
Render_requirements get_requirements();
@@ -454,14 +428,16 @@ namespace Rotor {
private:
int state;
double progress; //for a locking process: audio analysis or rendering
- //thread only does one thing at once
+ //thread only does one thing at once
std::deque<int> work_queue;
Poco::Mutex mutex; //lock for access from parent thread
std::string audio_filename;
+ std::string output_filename;
Audio_thumbnailer *audio_thumb;
vampHost::QMAnalyser audio_analyser;
Graph graph;
Node_factory factory;
+ float output_framerate;
};
}
diff --git a/rotord/rotord.h b/rotord/rotord.h
index 0eb7c76..7c0dbc3 100755
--- a/rotord/rotord.h
+++ b/rotord/rotord.h
@@ -47,7 +47,6 @@ using Poco::Util::HelpFormatter;
using Poco::Net::HTTPStreamFactory;
-//#include "avCodec.h"
#include "rotor.h"
class RotorRequestHandler: public HTTPRequestHandler