summaryrefslogtreecommitdiff
path: root/rotord
diff options
context:
space:
mode:
authorTim Redfern <tim@herge.(none)>2013-04-19 18:57:08 +0100
committerTim Redfern <tim@herge.(none)>2013-04-19 18:57:08 +0100
commit47b733bbc93e565d840596d4fbc2d5d5f3d18dd2 (patch)
treeb3b4195c877335d4164bb33d41b6bf5c0c4f4243 /rotord
parent8f75260c27d739aad3b019d29ba4178ba8ce55b5 (diff)
avcodec confusion
Diffstat (limited to 'rotord')
-rw-r--r--rotord/libavexporter.cpp124
-rw-r--r--rotord/libavexporter.h554
-rwxr-xr-xrotord/ofxMovieExporter.cpp50
-rwxr-xr-xrotord/ofxMovieExporter.h9
-rwxr-xr-xrotord/rotor.cpp2
-rwxr-xr-xrotord/rotor.h15
-rw-r--r--rotord/rotord.cbp2
7 files changed, 722 insertions, 34 deletions
diff --git a/rotord/libavexporter.cpp b/rotord/libavexporter.cpp
new file mode 100644
index 0000000..a46de5a
--- /dev/null
+++ b/rotord/libavexporter.cpp
@@ -0,0 +1,124 @@
+#include "libavexporter.h"
+
+bool libav::exporter::setup(int w,int h, int bitRate, int frameRate, std::string container){
+ // Initialize libavcodec, and register all codecs and formats. //
+ av_register_all();
+
+ this->w=w;
+ this->h=h;
+ this->bitRate=bitRate;
+ this->frameRate=frameRate;
+ this->container=container;
+
+ return true;
+}
+bool libav::exporter::record(std::string filename){
+
+ // allocate the output media context //
+ avformat_alloc_output_context2(&oc, NULL, NULL, filename.c_str());
+ if (!oc) {
+ printf("Could not deduce output format from file extension: using MPEG.\n");
+ avformat_alloc_output_context2(&oc, NULL, "mpeg", filename.c_str());
+ }
+ if (!oc) {
+ return false;
+ }
+ fmt = oc->oformat;
+
+ // Add the audio and video streams using the default format codecs
+ // * and initialize the codecs. //
+ video_st = NULL;
+ audio_st = NULL;
+
+ if (fmt->video_codec != AV_CODEC_ID_NONE) {
+ video_st = add_stream(oc, &video_codec, fmt->video_codec);
+ }
+ if (fmt->audio_codec != AV_CODEC_ID_NONE) {
+ audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
+ }
+
+ //set initial video params
+ video_st->codec->width=w;
+ video_st->codec->height=h;
+ video_st->codec->time_base.num = 1;//codecCtx->ticks_per_frame;
+ video_st->codec->time_base.den = frameRate;
+ video_st->time_base = video_st->codec->time_base;
+ //audioStream->time_base = codecCtx->time_base; //???has the capability of crashing
+
+ video_st->codec->gop_size = 10; /* emit one intra frame every ten frames */
+ video_st->codec->pix_fmt = PIX_FMT_YUV420P;
+
+ // Now that all the parameters are set, we can open the audio and
+ // * video codecs and allocate the necessary encode buffers. //
+ if (video_st)
+ open_video(oc, video_codec, video_st);
+ if (audio_st)
+ open_audio(oc, audio_codec, audio_st);
+
+ av_dump_format(oc, 0, filename.c_str(), 1);
+
+ // open the output file, if needed //
+ if (!(fmt->flags & AVFMT_NOFILE)) {
+ int ret = avio_open(&oc->pb, filename.c_str(), AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ std::cerr <<"Could not open " << filename.c_str() << std::endl;
+ return false;
+ }
+ }
+
+ // Write the stream header, if any. //
+ int ret = avformat_write_header(oc, NULL);
+ if (ret < 0) {
+ //std::cerr <<"Error occurred when opening output file:" << av_err2str(ret) << std::endl;
+ return false;
+ }
+
+ frame=0;
+ return true;
+}
+bool libav::exporter::encodeFrame(unsigned char *pixels){
+ // Compute current audio and video time. //
+ if (audio_st)
+ audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
+ else
+ audio_pts = 0.0;
+
+ if (video_st)
+ video_pts = (double)video_st->pts.val * video_st->time_base.num /
+ video_st->time_base.den;
+ else
+ video_pts = 0.0;
+
+ uint16_t *samples = av_malloc(audio_input_frame_size *
+ av_get_bytes_per_sample(audio_st->codec->sample_fmt) *
+ audio_st->codec->channels); //dummy audio
+
+ // write interleaved audio and video frames //
+ if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
+ write_audio_frame(oc, audio_st, samples);
+ } else {
+ write_video_frame(oc, video_st, pixels);
+ frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
+ }
+
+ av_free(samples);
+
+ std::cerr << "encoded frame " << outputframe << std::endl;
+ outputframe++;
+
+ return true;
+}
+void libav::exporter::finishRecord(){
+ // Close each codec. //
+ if (video_st)
+ close_video(oc, video_st);
+ if (audio_st)
+ close_audio(oc, audio_st);
+
+ if (!(fmt->flags & AVFMT_NOFILE))
+ // Close the output file. //
+ avio_close(oc->pb);
+
+ // free the stream //
+ avformat_free_context(oc);
+} \ No newline at end of file
diff --git a/rotord/libavexporter.h b/rotord/libavexporter.h
new file mode 100644
index 0000000..27bbbe5
--- /dev/null
+++ b/rotord/libavexporter.h
@@ -0,0 +1,554 @@
+/*
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+*/
+
+/*
+ * @file
+ * libavformat API example.
+ *
+ * Output a media file in any supported libavformat format.
+ * The default codecs are used.
+ * @example doc/examples/muxing.c
+*/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string>
+#include <math.h>
+#include <iostream>
+
+extern "C" {
+ #include <libavutil/mathematics.h>
+ #include <libavformat/avformat.h>
+ #include <libswscale/swscale.h>
+}
+
+// 5 seconds stream duration //
+#define STREAM_DURATION 200.0
+#define STREAM_FRAME_RATE 25 // 25 images/s //
+#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
+#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P // default pix_fmt //
+
+namespace libav {
+
+
+ static int sws_flags = SWS_BICUBIC;
+
+ //************************************************************//
+ // audio output //
+
+ static float t, tincr, tincr2;
+ static int audio_input_frame_size;
+
+ // Add an output stream. //
+ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
+ enum AVCodecID codec_id)
+ {
+ AVCodecContext *c;
+ AVStream *st;
+
+ // find the encoder //
+ *codec = avcodec_find_encoder(codec_id);
+ if (!(*codec)) {
+ //fprintf(stderr, "Could not find encoder for '%s'\n",
+ // avcodec_get_name(codec_id));
+ exit(1);
+ }
+
+ st = avformat_new_stream(oc, *codec);
+ if (!st) {
+ //fprintf(stderr, "Could not allocate stream\n");
+ exit(1);
+ }
+ st->id = oc->nb_streams-1;
+ c = st->codec;
+
+ switch ((*codec)->type) {
+ case AVMEDIA_TYPE_AUDIO:
+ st->id = 1;
+ c->sample_fmt = AV_SAMPLE_FMT_S16;
+ c->bit_rate = 64000;
+ c->sample_rate = 44100;
+ c->channels = 2;
+ c->channel_layout=AV_CH_LAYOUT_STEREO;
+ break;
+
+ case AVMEDIA_TYPE_VIDEO:
+ c->codec_id = codec_id;
+
+ c->bit_rate = 400000;
+ // Resolution must be a multiple of two. //
+ c->width = 352;
+ c->height = 288;
+ // timebase: This is the fundamental unit of time (in seconds) in terms
+ // * of which frame timestamps are represented. For fixed-fps content,
+ // * timebase should be 1/framerate and timestamp increments should be
+ // * identical to 1. //
+ c->time_base.den = STREAM_FRAME_RATE;
+ c->time_base.num = 1;
+ c->gop_size = 12; // emit one intra frame every twelve frames at most //
+ c->pix_fmt = STREAM_PIX_FMT;
+ if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
+ // just for testing, we also add B frames //
+ c->max_b_frames = 2;
+ }
+ if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
+ // Needed to avoid using macroblocks in which some coeffs overflow.
+ // * This does not happen with normal video, it just happens here as
+ // * the motion of the chroma plane does not match the luma plane. //
+ c->mb_decision = 2;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ // Some formats want stream headers to be separate. //
+ if (oc->oformat->flags & AVFMT_GLOBALHEADER)
+ c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+
+ return st;
+ }
+
+ //************************************************************//
+ // audio output //
+
+
+ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
+ {
+ AVCodecContext *c;
+ int ret;
+
+ c = st->codec;
+
+ // open it //
+ ret = avcodec_open2(c, codec, NULL);
+ if (ret < 0) {
+ //fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
+ exit(1);
+ }
+
+ // init signal generator //
+ t = 0;
+ tincr = 2 * M_PI * 110.0 / c->sample_rate;
+ // increment frequency by 110 Hz per second //
+ tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
+
+ if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
+ audio_input_frame_size = 10000;
+ else
+ audio_input_frame_size = c->frame_size;
+
+ /*
+ samples = av_malloc(audio_input_frame_size *
+ av_get_bytes_per_sample(c->sample_fmt) *
+ c->channels);
+ if (!samples) {
+ //fprintf(stderr, "Could not allocate audio samples buffer\n");
+ exit(1);
+ }
+ */
+ }
+
+ // Prepare a 16 bit dummy audio frame of 'frame_size' samples and
+ // * 'nb_channels' channels. //
+ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
+ {
+ int j, i, v;
+ int16_t *q;
+
+ q = samples;
+ for (j = 0; j < frame_size; j++) {
+ v = (int)(sin(t) * 10000);
+ for (i = 0; i < nb_channels; i++)
+ *q++ = v;
+ t += tincr;
+ tincr += tincr2;
+ }
+ }
+
+ static void write_audio_frame(AVFormatContext *oc, AVStream *st,uint16_t *samples)
+ {
+ AVCodecContext *c;
+ AVPacket pkt = { 0 }; // data and size must be 0;
+ AVFrame *frame = avcodec_alloc_frame();
+ int got_packet, ret;
+
+ av_init_packet(&pkt);
+ c = st->codec;
+
+ //get_audio_frame(samples, audio_input_frame_size, c->channels);
+ frame->nb_samples = audio_input_frame_size;
+ avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
+ (uint8_t *)samples,
+ audio_input_frame_size *
+ av_get_bytes_per_sample(c->sample_fmt) *
+ c->channels, 1);
+
+ ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
+ if (ret < 0) {
+ //fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
+ exit(1);
+ }
+
+ if (!got_packet)
+ return;
+
+ pkt.stream_index = st->index;
+
+ // Write the compressed frame to the media file. //
+ ret = av_interleaved_write_frame(oc, &pkt);
+ if (ret != 0) {
+ //fprintf(stderr, "Error while writing audio frame: %s\n",
+ // av_err2str(ret));
+ exit(1);
+ }
+ avcodec_free_frame(&frame);
+ }
+
+ static void close_audio(AVFormatContext *oc, AVStream *st)
+ {
+ avcodec_close(st->codec);
+
+
+ }
+
+ //************************************************************//
+ // video output //
+
+ static AVFrame *frame;
+ static AVPicture src_picture, dst_picture;
+ static int frame_count;
+ static uint8_t *outPixels;
+
+ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
+ {
+ int ret;
+ AVCodecContext *c = st->codec;
+
+ // open the codec //
+ ret = avcodec_open2(c, codec, NULL);
+ if (ret < 0) {
+ //fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
+ exit(1);
+ }
+
+ // allocate and init a re-usable frame //
+ frame = avcodec_alloc_frame();
+ if (!frame) {
+ //fprintf(stderr, "Could not allocate video frame\n");
+ exit(1);
+ }
+
+ // Allocate the encoded raw picture. //
+ ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
+ if (ret < 0) {
+ //fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
+ exit(1);
+ }
+
+ // If the output format is not YUV420P, then a temporary YUV420P
+ // * picture is needed too. It is then converted to the required
+ // * output format. //
+ if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
+ ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
+ if (ret < 0) {
+ //fprintf(stderr, "Could not allocate temporary picture: %s\n",
+ // av_err2str(ret));
+ exit(1);
+ }
+ }
+
+ // copy data and linesize picture pointers to frame //
+ *((AVPicture *)frame) = dst_picture;
+
+ outPixels = (uint8_t*)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, st->codec->width,st->codec->height));
+ }
+
+ // Prepare a dummy image. //
+ static void fill_yuv_image(AVPicture *pict, int frame_index,
+ int width, int height)
+ {
+ int x, y, i;
+
+ i = frame_index;
+
+ // Y //
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
+
+ // Cb and Cr //
+ for (y = 0; y < height / 2; y++) {
+ for (x = 0; x < width / 2; x++) {
+ pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
+ pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
+ }
+ }
+ }
+
+ static void write_video_frame(AVFormatContext *oc, AVStream *st, uint8_t *pixels)
+ {
+ int ret;
+ static struct SwsContext *sws_ctx;
+ AVCodecContext *c = st->codec;
+
+/*
+ if (frame_count >= STREAM_NB_FRAMES) {
+ // No more frames to compress. The codec has a latency of a few
+ // * frames if using B-frames, so we get the last frames by
+ // * passing the same picture again. //
+ } else {
+ if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
+ // as we only generate a YUV420P picture, we must convert it
+ // * to the codec pixel format if needed //
+ if (!sws_ctx) {
+ sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
+ c->width, c->height, c->pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (!sws_ctx) {
+ //fprintf(stderr,
+ // "Could not initialize the conversion context\n");
+ exit(1);
+ }
+ }
+ fill_yuv_image(&src_picture, frame_count, c->width, c->height);
+ sws_scale(sws_ctx,
+ (const uint8_t * const *)src_picture.data, src_picture.linesize,
+ 0, c->height, dst_picture.data, dst_picture.linesize);
+ } else {
+ fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
+ }
+ }
+*/
+ //always convert RGB to YUV
+ sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
+ c->width, c->height, AV_PIX_FMT_YUV420P,
+ sws_flags, NULL, NULL, NULL);
+
+ avpicture_fill(&src_picture, pixels, PIX_FMT_RGB24, c->width,c->height);
+ avpicture_fill(&dst_picture, outPixels, PIX_FMT_YUV420P, c->width,c->height);
+
+ sws_scale(sws_ctx, src_picture.data, src_picture.linesize, 0, c->height, dst_picture.data, dst_picture.linesize);
+
+ if (oc->oformat->flags & AVFMT_RAWPICTURE) {
+ // Raw video case - directly store the picture in the packet //
+ AVPacket pkt;
+ av_init_packet(&pkt);
+
+ pkt.flags |= AV_PKT_FLAG_KEY;
+ pkt.stream_index = st->index;
+ pkt.data = dst_picture.data[0];
+ pkt.size = sizeof(AVPicture);
+
+ ret = av_interleaved_write_frame(oc, &pkt);
+ } else {
+ AVPacket pkt = { 0 };
+ int got_packet;
+ av_init_packet(&pkt);
+
+ // encode the image //
+ ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
+ if (ret < 0) {
+ //fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
+ exit(1);
+ }
+ // If size is zero, it means the image was buffered. //
+
+ if (!ret && got_packet && pkt.size) {
+ pkt.stream_index = st->index;
+
+ // Write the compressed frame to the media file. //
+ ret = av_interleaved_write_frame(oc, &pkt);
+ } else {
+ ret = 0;
+ }
+ }
+ if (ret != 0) {
+ //fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
+ exit(1);
+ }
+ frame_count++;
+ }
+
+ static void close_video(AVFormatContext *oc, AVStream *st)
+ {
+ avcodec_close(st->codec);
+ av_free(src_picture.data[0]);
+ av_free(dst_picture.data[0]);
+ av_free(frame);
+ av_free(outPixels);
+ }
+
+ class exporter {
+ public:
+ bool setup(int w,int h, int bitRate, int frameRate, std::string container);
+ bool record(std::string filename);
+ bool encodeFrame(unsigned char *pixels);
+ void finishRecord();
+ private:
+ AVOutputFormat *fmt;
+ AVFormatContext *oc;
+ AVStream *audio_st, *video_st;
+ AVCodec *audio_codec, *video_codec;
+ double audio_pts, video_pts;
+
+ int w;
+ int h;
+ int bitRate;
+ int frameRate;
+ std::string container;
+
+ int outputframe;
+ };
+
+ //************************************************************//
+ // media file output //
+
+/*
+ int main(int argc, char **argv)
+ {
+ const char *filename;
+ AVOutputFormat *fmt;
+ AVFormatContext *oc;
+ AVStream *audio_st, *video_st;
+ AVCodec *audio_codec, *video_codec;
+ double audio_pts, video_pts;
+ int ret;
+
+ // Initialize libavcodec, and register all codecs and formats. //
+ av_register_all();
+
+ if (argc != 2) {
+ printf("usage: %s output_file\n"
+ "API example program to output a media file with libavformat.\n"
+ "This program generates a synthetic audio and video stream, encodes and\n"
+ "muxes them into a file named output_file.\n"
+ "The output format is automatically guessed according to the file extension.\n"
+ "Raw images can also be output by using '%%d' in the filename.\n"
+ "\n", argv[0]);
+ return 1;
+ }
+
+ filename = argv[1];
+
+ // allocate the output media context //
+ avformat_alloc_output_context2(&oc, NULL, NULL, filename);
+ if (!oc) {
+ printf("Could not deduce output format from file extension: using MPEG.\n");
+ avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
+ }
+ if (!oc) {
+ return 1;
+ }
+ fmt = oc->oformat;
+
+ // Add the audio and video streams using the default format codecs
+ // * and initialize the codecs. //
+ video_st = NULL;
+ audio_st = NULL;
+
+ if (fmt->video_codec != AV_CODEC_ID_NONE) {
+ video_st = add_stream(oc, &video_codec, fmt->video_codec);
+ }
+ if (fmt->audio_codec != AV_CODEC_ID_NONE) {
+ audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
+ }
+
+ // Now that all the parameters are set, we can open the audio and
+ * video codecs and allocate the necessary encode buffers. //
+ if (video_st)
+ open_video(oc, video_codec, video_st);
+ if (audio_st)
+ open_audio(oc, audio_codec, audio_st);
+
+ av_dump_format(oc, 0, filename, 1);
+
+ // open the output file, if needed //
+ if (!(fmt->flags & AVFMT_NOFILE)) {
+ ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ fprintf(stderr, "Could not open '%s': %s\n", filename,
+ av_err2str(ret));
+ return 1;
+ }
+ }
+
+ // Write the stream header, if any. //
+ ret = avformat_write_header(oc, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Error occurred when opening output file: %s\n",
+ av_err2str(ret));
+ return 1;
+ }
+
+ if (frame)
+ frame->pts = 0;
+ for (;;) {
+ // Compute current audio and video time. //
+ if (audio_st)
+ audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
+ else
+ audio_pts = 0.0;
+
+ if (video_st)
+ video_pts = (double)video_st->pts.val * video_st->time_base.num /
+ video_st->time_base.den;
+ else
+ video_pts = 0.0;
+
+ if ((!audio_st || audio_pts >= STREAM_DURATION) &&
+ (!video_st || video_pts >= STREAM_DURATION))
+ break;
+
+ // write interleaved audio and video frames //
+ if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
+ write_audio_frame(oc, audio_st);
+ } else {
+ write_video_frame(oc, video_st);
+ frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
+ }
+ }
+
+ // Write the trailer, if any. The trailer must be written before you
+ * close the CodecContexts open when you wrote the header; otherwise
+ * av_write_trailer() may try to use memory that was freed on
+ * av_codec_close(). //
+ av_write_trailer(oc);
+
+ // Close each codec. //
+ if (video_st)
+ close_video(oc, video_st);
+ if (audio_st)
+ close_audio(oc, audio_st);
+
+ if (!(fmt->flags & AVFMT_NOFILE))
+ // Close the output file. //
+ avio_close(oc->pb);
+
+ // free the stream //
+ avformat_free_context(oc);
+
+ return 0;
+ }
+ */
+}
diff --git a/rotord/ofxMovieExporter.cpp b/rotord/ofxMovieExporter.cpp
index 815daa1..15abfe2 100755
--- a/rotord/ofxMovieExporter.cpp
+++ b/rotord/ofxMovieExporter.cpp
@@ -132,7 +132,8 @@
avformat_write_header(formatCtx,&options);
lastFrameTime = 0;
- frameNum = 0;
+ aframeNum = 0;
+ vframeNum = 0;
recording = true;
return true;
@@ -217,7 +218,7 @@
av_write_frame(formatCtx, &pkt);
}
- frameNum++;
+ vframeNum++;
}
bool ofxMovieExporter::encodeFrame(unsigned char *pixels)
@@ -234,46 +235,55 @@
sws_scale(convertCtx, inFrame->data, inFrame->linesize, 0, inH, outFrame->data, outFrame->linesize);
+ AVPacket pkt;
int outSize = avcodec_encode_video(codecCtx, encodedBuf, ENCODED_FRAME_BUFFER_SIZE, outFrame);
if (outSize > 0)
{
- AVPacket pkt;
+
av_init_packet(&pkt);
- pkt.pts = (int64_t)frameNum*(frameInterval*(((float)videoStream->time_base.den)/videoStream->time_base.num));//ofGetFrameNum();//codecCtx->coded_frame->pts;
+ pkt.pts = (int64_t)vframeNum*(frameInterval*(((float)videoStream->time_base.den)/videoStream->time_base.num));//ofGetFrameNum();//codecCtx->coded_frame->pts;
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.dts = pkt.pts;
pkt.stream_index = videoStream->index;
pkt.data = encodedBuf;
pkt.size = outSize;
av_interleaved_write_frame(formatCtx, &pkt);
+
+ vframeNum++;
}
+
//is it as simple as writing an audio packet for every video packet?
// avcodec_encode_audio2(AVCodecContext *avctx,AVPacket *avpkt,const AVFrame *frame,int *got_packet_ptr);
AVPacket apkt;
av_init_packet(&apkt);
- apkt.pts = (int64_t)frameNum*(frameInterval*(((float)videoStream->time_base.den)/videoStream->time_base.num));//ofGetFrameNum();//codecCtx->coded_frame->pts;
- apkt.flags |= AV_PKT_FLAG_KEY;
- apkt.dts = apkt.pts;
- apkt.stream_index = audioStream->index;
- //apkt.data = encodedBuf;
- apkt.size = outSize;
+ apkt.pts = (int64_t)aframeNum*(aframeInterval*(((float)videoStream->time_base.den)/videoStream->time_base.num));//ofGetFrameNum();//codecCtx->coded_frame->pts;
- AVFrame* afrm=avcodec_alloc_frame();
- afrm->nb_samples=44100/25;
- afrm->format=AV_SAMPLE_FMT_S16;
- uint8_t *d=new uint8_t[afrm->nb_samples*2*2];
- afrm->data[0]=d;
+ while(apkt.pts<pkt.pts) {
+ apkt.flags |= AV_PKT_FLAG_KEY;
+ apkt.dts = apkt.pts;
+ apkt.stream_index = audioStream->index;
+ //apkt.data = encodedBuf;
+ apkt.size = outSize;
- int gpp;
+ AVFrame* afrm=avcodec_alloc_frame();
+ afrm->nb_samples=44100/25;
+ afrm->format=AV_SAMPLE_FMT_S16;
+ uint8_t *d=new uint8_t[afrm->nb_samples*2*2];
+ afrm->data[0]=d;
- //avcodec_fill_audio_frame(afrm, 2, AV_SAMPLE_FMT_S16,(uint8_t *)d,(44100/25) * 2 * 2,1);
+ int gpp;
- int audioOutSize = avcodec_encode_audio2(acodecCtx,&apkt,afrm,&gpp);
+ //avcodec_fill_audio_frame(afrm, 2, AV_SAMPLE_FMT_S16,(uint8_t *)d,(44100/25) * 2 * 2,1);
- av_interleaved_write_frame(formatCtx, &apkt);
+ int audioOutSize = avcodec_encode_audio2(acodecCtx,&apkt,afrm,&gpp);
+
+ av_interleaved_write_frame(formatCtx, &apkt);
+
+ aframeNum++;
+ apkt.pts = (int64_t)aframeNum*(aframeInterval*(((float)videoStream->time_base.den)/videoStream->time_base.num));//ofGetFrameNum();//codecCtx->coded_frame->pts;
+ }
- frameNum++;
return true;
}
diff --git a/rotord/ofxMovieExporter.h b/rotord/ofxMovieExporter.h
index 4682107..5433812 100755
--- a/rotord/ofxMovieExporter.h
+++ b/rotord/ofxMovieExporter.h
@@ -115,7 +115,9 @@ class ofxMovieExporter
// codecId = CODEC_ID_MPEG2VIDEO, container = "mov"
bool setup(int outW = OUT_W, int outH = OUT_H, int bitRate = BIT_RATE, int frameRate = FRAME_RATE, AVCodecID codecId = CODEC_ID, std::string container = CONTAINER);
bool record(std::string filePrefix=FILENAME_PREFIX, std::string folderPath="");
+ bool encodeFrame(unsigned char *pixels);
void finishRecord();
+
void stop();
bool isRecording() const;
@@ -133,7 +135,6 @@ class ofxMovieExporter
inline int getRecordingWidth() {return outW;}
inline int getRecordingHeight() {return outH;}
- bool encodeFrame(unsigned char *pixels);
private:
//#ifdef _THREAD_CAPTURE
@@ -158,8 +159,9 @@ class ofxMovieExporter
int frameRate;
int bitRate;
float frameInterval;
+ float aframeInterval;
float lastFrameTime;
- int frameNum;
+
std::string outFileName;
AVOutputFormat* outputFormat;
@@ -190,6 +192,7 @@ class ofxMovieExporter
bool usePixelSource;
unsigned char* pixelSource;
- int frame_ticks;
+ int aframeNum;
+ int vframeNum;
};
diff --git a/rotord/rotor.cpp b/rotord/rotor.cpp
index 975ec85..4c1146e 100755
--- a/rotord/rotor.cpp
+++ b/rotord/rotor.cpp
@@ -771,7 +771,7 @@ bool Video_output::render(const float duration, const float framerate,const stri
std::string container ="mov";
- if (exporter->setup(outW,outH,bitRate,frameRate,codecId,container)) {
+ if (exporter->setup(outW,outH,bitRate,frameRate,container)) { //codecId,
if (exporter->record(output_filename)) {
cerr << "Rotor: Video_output rendering " << duration << " seconds at " << framerate << " fps" << endl;
diff --git a/rotord/rotor.h b/rotord/rotor.h
index bedf936..d808dd8 100755
--- a/rotord/rotor.h
+++ b/rotord/rotor.h
@@ -97,7 +97,7 @@ extern "C" {
#include "vampHost.h"
#include "xmlIO.h"
-//#include "avCodec.h"
+#include "libavexporter.h"
namespace Rotor {
#define IDLE 0
@@ -268,13 +268,6 @@ namespace Rotor {
vector<Image_input*> image_inputs; //image node also has image inputs and outputs
void create_image_input(const string &description) {image_inputs.push_back(new Image_input(description));};
virtual Image *get_output(const Frame_spec &frame)=0;
-
- /*{ //sample implementation
- //do something with the inputs
- //and then
- return ((Image_node*)(image_inputs[0]->connection))->get_output(frame);
- }
- */
Image *get_preview(const Frame_spec &frame);
Image *image; //this can be privately allocated or just passed on as the node see fit
private:
@@ -406,8 +399,9 @@ namespace Rotor {
Video_output(){};
Video_output(map<string,string> &settings) {
base_settings(settings);
- exporter=new ofxMovieExporter();
+ exporter=new libav::exporter();
};
+ ~Video_output(){ delete exporter; };
Image *get_output(const Frame_spec &frame){
if (image_inputs[0]->connection) {
return ((Image_node*)(image_inputs[0]->connection))->get_output(frame);
@@ -418,7 +412,8 @@ namespace Rotor {
bool render(const float duration, const float framerate,const string &output_filename,const string &audio_filename);
private:
- ofxMovieExporter *exporter;
+ //ofxMovieExporter *exporter;
+ libav::exporter *exporter;
};
//-------------------------------------------------------------------
class Node_factory{
diff --git a/rotord/rotord.cbp b/rotord/rotord.cbp
index 566a10c..76c5b1e 100644
--- a/rotord/rotord.cbp
+++ b/rotord/rotord.cbp
@@ -50,6 +50,8 @@
</Compiler>
<Unit filename="Makefile" />
<Unit filename="avCodec.h" />
+ <Unit filename="libavexporter.cpp" />
+ <Unit filename="libavexporter.h" />
<Unit filename="ofUtils.cpp" />
<Unit filename="ofUtils.h" />
<Unit filename="ofxMovieExporter.cpp" />