diff options
Diffstat (limited to 'rotord/rotor.cpp')
| -rwxr-xr-x | rotord/rotor.cpp | 142 |
1 files changed, 135 insertions, 7 deletions
diff --git a/rotord/rotor.cpp b/rotord/rotor.cpp index 5736c2f..2da6a8b 100755 --- a/rotord/rotor.cpp +++ b/rotord/rotor.cpp @@ -25,7 +25,7 @@ void Render_context::runTask() { processors.push_back(audio_thumb); vector<Node*> analysers=graph.find_nodes("audio_analysis"); for (auto a: analysers) { - processors.push_back(a); + processors.push_back(dynamic_cast<Base_audio_processor*>(a)); } if (load_audio(audio_filename,processors)) { state=AUDIO_READY; @@ -308,7 +308,7 @@ bool Render_context::load_audio(const string &filename,vector<Base_audio_process if (avformat_find_stream_info(formatContext, NULL) < 0) { av_free(frame); - av_close_input_file(formatContext); + avformat_close_input(&formatContext); std::cout << "Error finding the stream info" << std::endl; return false; } @@ -326,7 +326,7 @@ bool Render_context::load_audio(const string &filename,vector<Base_audio_process if (audioStream == NULL) { av_free(frame); - av_close_input_file(formatContext); + avformat_close_input(&formatContext); std::cout << "Could not find any audio stream in the file" << std::endl; return false; } @@ -337,14 +337,14 @@ bool Render_context::load_audio(const string &filename,vector<Base_audio_process if (codecContext->codec == NULL) { av_free(frame); - av_close_input_file(formatContext); + avformat_close_input(&formatContext); std::cout << "Couldn't find a proper decoder" << std::endl; return false; } else if (avcodec_open2(codecContext, codecContext->codec, NULL) != 0) { av_free(frame); - av_close_input_file(formatContext); + avformat_close_input(&formatContext); std::cout << "Couldn't open the context with the decoder" << std::endl; return false; } @@ -378,7 +378,8 @@ bool Render_context::load_audio(const string &filename,vector<Base_audio_process { // Try to decode the packet into a frame int frameFinished = 0; - int bytes = avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet); + //int bytes = + avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet); // Some frames rely on multiple packets, so we have to make sure the frame is finished before // we can use it @@ -438,7 +439,7 @@ bool Render_context::load_audio(const string &filename,vector<Base_audio_process av_free(frame); avcodec_close(codecContext); - av_close_input_file(formatContext); + avformat_close_input(&formatContext); return true; } @@ -606,4 +607,131 @@ void Audio_analysis::print_features(){ } bool Video_output::render(const float duration, const float framerate,const string &output_filename,const string &audio_filename){ //render out the network + /* + //testing signal routes + cerr << "Rotor: Signal_output rendering " << duration << " seconds at " << framerate << " frames per second" << endl; + float step=1.0f/framerate; + float v=0.0f; + for (float f=0.0f;f<duration;f+=step) { + float u=get_output(Time_spec(f,framerate)); + if (!fequal(u,v)) { + xml_out+=("<signal time='"+ofToString(f)+"'>"+ofToString(u)+"</signal>\n"); + v=u; + } + } + return true; + */ + //set up output context + //then iterate through frames + //querying graph at each frame + + av_register_all(); + + AVCodec *codec; + AVCodecContext *c= NULL; + int i, out_size, size, x, y, outbuf_size; + FILE *f; + AVFrame *picture; + uint8_t *outbuf, *picture_buf; + + cerr << "Rotor: rendering " << output_filename << " , " << duration << " seconds at " << framerate << " frames per second" << endl; + + /* find the mpeg1 video encoder */ + codec = avcodec_find_encoder(AV_CODEC_ID_H264); + if (!codec) { + cerr<< "codec not found" << endl; + return false; + } + + c= avcodec_alloc_context3(codec); + picture= avcodec_alloc_frame(); + + /* put sample parameters */ + c->bit_rate = 400000; + /* resolution must be a multiple of two */ + c->width = 640; + c->height = 250; + /* frames per second */ + c->time_base= (AVRational){1,25}; + c->gop_size = 10; /* emit one intra frame every ten frames */ + c->max_b_frames=1; + c->pix_fmt = PIX_FMT_YUV420P; //AV_PIX_FMT_RGB24 + + AVDictionary *options; + + /* open it */ + if (avcodec_open2(c, codec, &options) < 0) { + cerr << "could not open codec" << endl; + return false; + } + + f = fopen(output_filename.c_str(), "wb"); + if (!f) { + cerr << "could not open "<< output_filename<<endl; + return false; + } + + /* alloc image and output buffer */ + outbuf_size = 100000; + outbuf = malloc(outbuf_size); + size = c->width * c->height; + picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */ + + picture->data[0] = picture_buf; + picture->data[1] = picture->data[0] + size; + picture->data[2] = picture->data[1] + size / 4; + picture->linesize[0] = c->width; + picture->linesize[1] = c->width / 2; + picture->linesize[2] = c->width / 2; + + /* encode 1 second of video */ + for(i=0;i<25;i++) { + fflush(stdout); + /* prepare a dummy image */ + /* Y */ + for(y=0;y<c->height;y++) { + for(x=0;x<c->width;x++) { + picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3; + } + } + + /* Cb and Cr */ + for(y=0;y<c->height/2;y++) { + for(x=0;x<c->width/2;x++) { + picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2; + picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5; + } + } + + /* encode the image */ + out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture); + printf("encoding frame %3d (size=%5d)\n", i, out_size); + fwrite(outbuf, 1, out_size, f); + } + + /* get the delayed frames */ + for(; out_size; i++) { + fflush(stdout); + + out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL); + printf("write frame %3d (size=%5d)\n", i, out_size); + fwrite(outbuf, 1, out_size, f); + } + + /* add sequence end code to have a real mpeg file */ + outbuf[0] = 0x00; + outbuf[1] = 0x00; + outbuf[2] = 0x01; + outbuf[3] = 0xb7; + fwrite(outbuf, 1, 4, f); + fclose(f); + free(picture_buf); + free(outbuf); + + avcodec_close(c); + av_free(c); + av_free(picture); + printf("\n"); + + return true; }
\ No newline at end of file |
