diff options
| author | Tim Redfern <tim@herge.(none)> | 2013-04-12 17:23:15 +0100 |
|---|---|---|
| committer | Tim Redfern <tim@herge.(none)> | 2013-04-12 17:23:15 +0100 |
| commit | 4c99697c528e11a4195b572bf9f72f80c2fe3ea6 (patch) | |
| tree | 2bde485776fe5717e5de647094ed7ea18130b49d /rotord | |
| parent | 31d5bb487a6a245c80fb2154a8eca99c9ff4e6e6 (diff) | |
adding container
Diffstat (limited to 'rotord')
| -rwxr-xr-x | rotord/rotor.cpp | 153 |
1 files changed, 136 insertions, 17 deletions
diff --git a/rotord/rotor.cpp b/rotord/rotor.cpp index e0c91dd..ee170f0 100755 --- a/rotord/rotor.cpp +++ b/rotord/rotor.cpp @@ -605,8 +605,6 @@ void Audio_analysis::print_features(){ cerr<<i.second<<" "<<i.first<<endl; } } -bool Video_output::render(const float duration, const float framerate,const string &output_filename,const string &audio_filename){ - //render out the network /* //testing signal routes cerr << "Rotor: Signal_output rendering " << duration << " seconds at " << framerate << " frames per second" << endl; @@ -621,6 +619,11 @@ bool Video_output::render(const float duration, const float framerate,const stri } return true; */ + +/* +bool Video_output::render(const float duration, const float framerate,const string &output_filename,const string &audio_filename){ + //render out the network + //set up output context //then iterate through frames //querying graph at each frame @@ -636,7 +639,6 @@ bool Video_output::render(const float duration, const float framerate,const stri cerr << "Rotor: rendering " << output_filename << " , " << duration << " seconds at " << framerate << " frames per second" << endl; - /* find the mpeg1 video encoder */ codec = avcodec_find_encoder(AV_CODEC_ID_H264); if (!codec) { cerr<< "codec not found" << endl; @@ -646,21 +648,21 @@ bool Video_output::render(const float duration, const float framerate,const stri c= avcodec_alloc_context3(codec); picture= avcodec_alloc_frame(); - /* put sample parameters */ + // put sample parameters / c->bit_rate = 400000; - /* resolution must be a multiple of two */ + // resolution must be a multiple of two / c->width = 640; c->height = 480; - /* frames per second */ + // frames per second / c->time_base= (AVRational){1,25}; - c->gop_size = 10; /* emit one intra frame every ten frames */ + c->gop_size = 10; // emit one intra frame every ten frames / c->max_b_frames=1; c->pix_fmt = PIX_FMT_YUV420P; //AV_PIX_FMT_RGB24 AVDictionary *options; //= NULL; causes a forward declaration error!? options=NULL; - /* open it */ + // open it / if (avcodec_open2(c, codec, &options) < 0) { cerr << "could not open codec" << endl; return false; @@ -672,11 +674,11 @@ bool Video_output::render(const float duration, const float framerate,const stri return false; } - /* alloc image and output buffer */ + // alloc image and output buffer/ outbuf_size = 100000; outbuf = malloc(outbuf_size); size = c->width * c->height; - picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */ + picture_buf = malloc((size * 3) / 2); // size for YUV 420 / picture->data[0] = picture_buf; picture->data[1] = picture->data[0] + size; @@ -685,18 +687,18 @@ bool Video_output::render(const float duration, const float framerate,const stri picture->linesize[1] = c->width / 2; picture->linesize[2] = c->width / 2; - /* encode 1 second of video */ + // encode 1 second of video / for(i=0;i<250;i++) { fflush(stdout); - /* prepare a dummy image */ - /* Y */ + // prepare a dummy image / + // Y / for(y=0;y<c->height;y++) { for(x=0;x<c->width;x++) { picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3; } } - /* Cb and Cr */ + // Cb and Cr / for(y=0;y<c->height/2;y++) { for(x=0;x<c->width/2;x++) { picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2; @@ -704,13 +706,13 @@ bool Video_output::render(const float duration, const float framerate,const stri } } - /* encode the image */ + // encode the image / out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture); printf("encoding frame %3d (size=%5d)\n", i, out_size); fwrite(outbuf, 1, out_size, f); } - /* get the delayed frames */ + // get the delayed frames / for(; out_size; i++) { fflush(stdout); @@ -719,7 +721,7 @@ bool Video_output::render(const float duration, const float framerate,const stri fwrite(outbuf, 1, out_size, f); } - /* add sequence end code to have a real mpeg file */ + // add sequence end code to have a real mpeg file / outbuf[0] = 0x00; outbuf[1] = 0x00; outbuf[2] = 0x01; @@ -737,3 +739,120 @@ bool Video_output::render(const float duration, const float framerate,const stri return true; } +*/ +bool Video_output::render(const float duration, const float framerate,const string &output_filename,const string &audio_filename){ + + AVOutputFormat *fmt; + AVFormatContext *oc; + AVStream *audio_st, *video_st; + double audio_pts, video_pts; + int i; + + /* Initialize libavcodec, and register all codecs and formats. */ + av_register_all(); + //think about this: when to register and unregister? + + + /* Autodetect the output format from the name. default is MPEG. */ + fmt = av_guess_format(NULL, output_filename.c_str(), NULL); + if (!fmt) { + printf("Could not deduce output format from file extension: using MPEG.\n"); + fmt = av_guess_format("mpeg", NULL, NULL); + } + if (!fmt) { + cerr << "Rotor: could not find suitable output format" << endl; + return false; + } + + /* Allocate the output media context. */ + oc = avformat_alloc_context(); + if (!oc) { + cerr <<"Rotor: memory error"<< endl; + return false; + } + oc->oformat = fmt; + snprintf(oc->filename, sizeof(oc->filename), "%s", filename); + + /* Add the audio and video streams using the default format codecs + * and initialize the codecs. */ + video_st = NULL; + audio_st = NULL; + if (fmt->video_codec != AV_CODEC_ID_NONE) { + video_st = add_video_stream(oc, fmt->video_codec); + } + if (fmt->audio_codec != AV_CODEC_ID_NONE) { + audio_st = add_audio_stream(oc, fmt->audio_codec); + } + + /* Now that all the parameters are set, we can open the audio and + * video codecs and allocate the necessary encode buffers. */ + if (video_st) + open_video(oc, video_st); + if (audio_st) + open_audio(oc, audio_st); + + av_dump_format(oc, 0, filename, 1); + + /* open the output file, if needed */ + if (!(fmt->flags & AVFMT_NOFILE)) { + if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) { + fprintf(stderr, "Could not open '%s'\n", filename); + return 1; + } + } + + /* Write the stream header, if any. */ + avformat_write_header(oc, NULL); + + for (;;) { + /* Compute current audio and video time. */ + if (audio_st) + audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; + else + audio_pts = 0.0; + + if (video_st) + video_pts = (double)video_st->pts.val * video_st->time_base.num / + video_st->time_base.den; + else + video_pts = 0.0; + + if ((!audio_st || audio_pts >= STREAM_DURATION) && + (!video_st || video_pts >= STREAM_DURATION)) + break; + + /* write interleaved audio and video frames */ + if (!video_st || (video_st && audio_st && audio_pts < video_pts)) { + write_audio_frame(oc, audio_st); + } else { + write_video_frame(oc, video_st); + } + } + + /* Write the trailer, if any. The trailer must be written before you + * close the CodecContexts open when you wrote the header; otherwise + * av_write_trailer() may try to use memory that was freed on + * av_codec_close(). */ + av_write_trailer(oc); + + /* Close each codec. */ + if (video_st) + close_video(oc, video_st); + if (audio_st) + close_audio(oc, audio_st); + + /* Free the streams. */ + for (i = 0; i < oc->nb_streams; i++) { + av_freep(&oc->streams[i]->codec); + av_freep(&oc->streams[i]); + } + + if (!(fmt->flags & AVFMT_NOFILE)) + /* Close the output file. */ + avio_close(oc->pb); + + /* free the stream */ + av_free(oc); + + return 0; +}
\ No newline at end of file |
