summaryrefslogtreecommitdiff
path: root/rotord/libavexporter.cpp
diff options
context:
space:
mode:
authorTim Redfern <tim@eclectronics.org>2013-05-30 17:19:36 +0100
committerTim Redfern <tim@eclectronics.org>2013-05-30 17:19:36 +0100
commite7c0b4a70990a1293056de92b555743e378b06eb (patch)
tree30730e09de90b835c7dcebd44bad6f5899a7bb60 /rotord/libavexporter.cpp
parentf648d137d381d778f1ae25a25a2cc7257128123b (diff)
thread safety issues
Diffstat (limited to 'rotord/libavexporter.cpp')
-rw-r--r--rotord/libavexporter.cpp170
1 files changed, 0 insertions, 170 deletions
diff --git a/rotord/libavexporter.cpp b/rotord/libavexporter.cpp
deleted file mode 100644
index 7b6d1cf..0000000
--- a/rotord/libavexporter.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
-#include "libavexporter.h"
-
-
-bool libav::Exporter::setup(int w,int h, int bitRate, int frameRate, std::string container){
- // Initialize libavcodec, and register all codecs and formats. //
- av_register_all();
-
-
- this->w=w;
- this->h=h;
- this->bitRate=bitRate;
- this->frameRate=frameRate;
- this->container=container;
-
- return true;
-}
-bool libav::Exporter::record(std::string filename){
-
- // allocate the output media context //
- avformat_alloc_output_context2(&oc, NULL, NULL, filename.c_str());
- if (!oc) {
- printf("Could not deduce output format from file extension: using MPEG.\n");
- avformat_alloc_output_context2(&oc, NULL, "mpeg", filename.c_str());
- }
- if (!oc) {
- return false;
- }
- fmt = oc->oformat;
-
- // Add the audio and video streams using the default format codecs
- // * and initialize the codecs. //
- video_st = NULL;
- audio_st = NULL;
-
- if (fmt->video_codec != AV_CODEC_ID_NONE) {
- video_st = add_stream(oc, &video_codec, fmt->video_codec);
- }
- if (fmt->audio_codec != AV_CODEC_ID_NONE) {
- audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
- }
-
- //set initial video params
- video_st->codec->width=w;
- video_st->codec->height=h;
- video_st->codec->time_base.num = 1;//codecCtx->ticks_per_frame;
- video_st->codec->time_base.den = frameRate;
- video_st->time_base = video_st->codec->time_base;
- //audioStream->time_base = codecCtx->time_base; //???has the capability of crashing
-
- video_st->codec->gop_size = 10; /* emit one intra frame every ten frames */
- video_st->codec->pix_fmt = PIX_FMT_YUV420P;
-
- // Now that all the parameters are set, we can open the audio and
- // * video codecs and allocate the necessary encode buffers. //
- if (video_st)
- open_video(oc, video_codec, video_st);
- if (audio_st) {
- audioframesize=open_audio(oc, audio_codec, audio_st);
- audiostep=((float)audioframesize)/(audio_st->codec->sample_rate);
- std::cerr << "opened audio codec with "<<audioframesize<<" frame size and "<<audiostep<<" seconds per frame"<<std::endl;
- }
-
-
- av_dump_format(oc, 0, filename.c_str(), 1);
-
- // open the output file, if needed //
- if (!(fmt->flags & AVFMT_NOFILE)) {
- int ret = avio_open(&oc->pb, filename.c_str(), AVIO_FLAG_WRITE);
- if (ret < 0) {
- std::cerr <<"Could not open " << filename.c_str() << std::endl;
- return false;
- }
- }
-
- // Write the stream header, if any. //
- int ret = avformat_write_header(oc, NULL);
- if (ret < 0) {
- //std::cerr <<"Error occurred when opening output file:" << av_err2str(ret) << std::endl;
- return false;
- }
-
- if (frame)
- frame->pts = 0;
-
- outputframe=0;
-
- return true;
-}
-bool libav::Exporter::encodeFrame(unsigned char *pixels,uint16_t *samples){
- // Compute current audio and video time. //
- if (audio_st)
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- else
- audio_pts = 0.0;
-
- if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num /
- video_st->time_base.den;
- else
- video_pts = 0.0;
-
- // write interleaved audio and video frames //
- if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
- write_audio_frame(oc, audio_st, samples);
- } else {
- write_video_frame(oc, video_st, pixels);
-
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- }
-
- //std::cerr << "encoded frame " << outputframe << std::endl;
- outputframe++;
-
- return true;
-}
-bool libav::Exporter::encodeFrame(unsigned char *pixels,AVPacket *audio){
- // Compute current audio and video time. //
- if (audio_st)
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- else
- audio_pts = 0.0;
-
- if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num /
- video_st->time_base.den;
- else
- video_pts = 0.0;
-
- // write interleaved audio and video frames //
- if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
- write_audio_frame(oc, audio_st, audio);
- } else {
- write_video_frame(oc, video_st, pixels);
-
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- }
-
- //std::cerr << "encoded frame " << outputframe << std::endl;
- outputframe++;
-
- return true;
-}
-bool libav::Exporter::encodeFrame(unsigned char *pixels){
- video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
- write_video_frame(oc, video_st, pixels);
- frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
- outputframe++;
- return true;
-}
-bool libav::Exporter::encodeFrame(uint16_t *samples){
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- write_audio_frame(oc, audio_st, samples);
- return true;
-}
-void libav::Exporter::finishRecord(){
-
- av_write_trailer(oc);
- // Close each codec. //
- if (video_st)
- close_video(oc, video_st);
- if (audio_st)
- close_audio(oc, audio_st);
-
- if (!(fmt->flags & AVFMT_NOFILE))
- // Close the output file. //
- avio_close(oc->pb);
-
- // free the stream //
- avformat_free_context(oc);
-}