summaryrefslogtreecommitdiff
path: root/NT/src/libavwrapper.h
diff options
context:
space:
mode:
Diffstat (limited to 'NT/src/libavwrapper.h')
-rw-r--r--NT/src/libavwrapper.h239
1 files changed, 239 insertions, 0 deletions
diff --git a/NT/src/libavwrapper.h b/NT/src/libavwrapper.h
new file mode 100644
index 0000000..162a77e
--- /dev/null
+++ b/NT/src/libavwrapper.h
@@ -0,0 +1,239 @@
+#ifndef libavwrapper_H
+#define libavwrapper_H
+
+#ifndef UINT64_C
+#define UINT64_C(c) (c ## ULL)
+#endif
+
+#include "Poco/Mutex.h"
+#include "Poco/ScopedLock.h"
+#include "Poco/StringTokenizer.h"
+#include "Poco/File.h"
+
+extern Poco::Mutex mutex; //application wide mutex
+
+extern "C" {
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavutil/pixfmt.h>
+#include <libavutil/opt.h>
+#include <libswscale/swscale.h>
+}
+
+#include <string>
+#include <stdexcept>
+#include <iostream>
+#include <fstream>
+#include <math.h>
+#include <vector>
+
+#include <ffms.h>
+
+
+namespace libav {
+ // Some libavcodec calls are not reentrant
+ void maybeInitFFMpegLib();
+
+ class video_decoder
+ {
+ public:
+ video_decoder(){
+ maybeInitFFMpegLib();
+ pixfmts[0] = FFMS_GetPixFmt("rgb24");
+ pixfmts[1] = -1;
+ h=0;
+ w=0;
+ source=NULL;
+ loaded=false;
+ err.Buffer = errmsg;
+ err.BufferSize = sizeof(errmsg);
+ err.ErrorType = FFMS_ERROR_SUCCESS;
+ err.SubType = FFMS_ERROR_SUCCESS;
+ }
+ ~video_decoder(){
+ cleanup();
+ }
+ void cleanup();
+ bool open(const std::string& filename);
+ double get_framerate(){
+ if (loaded) return (((double)props->FPSNumerator)/((double)props->FPSDenominator));
+ else return -1.0;
+ }
+ int get_number_frames(){
+ if (loaded) return props->NumFrames;
+ else return -1;
+ }
+ int get_number_channels(){
+ return 3; //this is what we convert to
+ }
+ int get_width(){
+ return w;
+ }
+ int get_height(){
+ return h;
+ }
+ bool fetch_frame(int width,int height,int wanted){
+ if (FFMS_SetOutputFormatV2(source, pixfmts, width, height, FFMS_RESIZER_BICUBIC, &err)) {
+ std::cerr<<"ffmpegsource: "<<err.Buffer<<std::endl;
+ return false;
+ }
+ frame = FFMS_GetFrame(source, wanted%props->NumFrames, &err);
+ if (frame == NULL) {
+ std::cerr<<"ffmpegsource: "<<err.Buffer<<std::endl;
+ return false;
+ }
+ return true;
+
+ }
+
+ FFMS_VideoSource *source;
+ const FFMS_VideoProperties *props;
+ const FFMS_Frame *frame;
+ FFMS_ErrorInfo err;
+ char errmsg[1024];
+ int pixfmts[2];
+ bool loaded;
+ int h,w;
+ };
+
+ class audio_decoder
+ {
+ public:
+ audio_decoder(){
+ maybeInitFFMpegLib();
+ source=nullptr;
+ props=nullptr;
+ loaded=false;
+ err.Buffer = errmsg;
+ err.BufferSize = sizeof(errmsg);
+ err.ErrorType = FFMS_ERROR_SUCCESS;
+ err.SubType = FFMS_ERROR_SUCCESS;
+ }
+ ~audio_decoder(){
+ cleanup();
+ }
+ void cleanup();
+ bool open(const std::string& filename);
+ int get_format(){
+ if (props) return props->SampleFormat;
+ else return 0;
+ }
+ int get_sample_rate(){
+ if (props) return props->SampleRate;
+ else return 0;
+ }
+ int get_bit_depth(){
+ if (props) return props->BitsPerSample;
+ else return 0;
+ }
+ int get_number_channels(){
+ if (props) return props->Channels;
+ else return 0;
+ }
+ int get_number_samples(){
+ if (props) return props->NumSamples;
+ else return 0;
+ }
+ int64_t get_channel_layout(){
+ if (props) return props->ChannelLayout;
+ else return 0;
+ }
+ double get_duration(){
+ if (props) return ((double)props->NumSamples)/props->SampleRate;
+ else return 0;
+ }
+ bool get_samples(void *buf,int64_t start, int64_t count){
+ if (source) {
+ if (FFMS_GetAudio(source, buf, start, count, &err)) {
+ std::cerr<<"ffmpegsource: "<<err.Buffer<<std::endl;
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ FFMS_AudioSource *source;
+ const FFMS_AudioProperties *props;
+ FFMS_Frame *frame;
+ FFMS_ErrorInfo err;
+ char errmsg[1024];
+ bool loaded;
+ };
+
+ class exporter {
+ public:
+ exporter(){
+ sws_ctx = NULL;
+ fragmentation=false;
+ }
+ virtual ~exporter(){
+ if (NULL != sws_ctx) {
+ sws_freeContext(sws_ctx);
+ sws_ctx = NULL;
+ }
+ };
+ bool setup(int w,int h, int bitRate, int frameRate, std::string container, bool _fragmentation);
+ bool record(std::string filename);
+ bool encodeFrame(unsigned char *pixels, uint16_t *samples);
+ bool encodeFrame(unsigned char *pixels,AVPacket *audiopkt); //is possible to just copy the packets?
+ bool encodeFrame(unsigned char *pixels,bool keyframe=false);
+ bool encodeFrame(uint16_t *samples);
+ void finishRecord();
+ int get_audio_framesize(){return audioframesize;};
+ double get_audio_step(){return audiostep;};
+
+ AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,enum AVCodecID codec_id); //AVCodecID
+ bool open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st);
+ int open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st);
+
+ void write_audio_frame(AVFormatContext *oc, AVStream *st,uint16_t *samples);
+ void write_audio_frame(AVFormatContext *oc, AVStream *st,AVPacket *pkt);
+ void close_audio(AVFormatContext *oc, AVStream *st);
+
+ void write_video_frame(AVFormatContext *oc, AVStream *st, uint8_t *pixels, bool keyframe=false);
+ void close_video(AVFormatContext *oc, AVStream *st);
+
+ private:
+ AVOutputFormat *fmt;
+ AVFormatContext *oc;
+ AVStream *audio_st, *video_st;
+ AVCodec *audio_codec, *video_codec;
+ double audio_pts, video_pts;
+
+ struct SwsContext *sws_ctx;
+
+ int audioframesize;
+ double audiostep;
+ int w;
+ int h;
+ int bitRate;
+ int frameRate;
+ std::string container;
+
+ int outputframe;
+
+ // video output //
+
+ AVFrame *frame;
+ AVPicture src_picture, dst_picture;
+ int frame_count;
+ uint8_t *outPixels;
+
+ bool fragmentation;
+
+
+ //************************************************************//
+ // audio output //
+
+ double t, tincr, tincr2;
+ int audio_input_frame_size;
+
+
+ };
+
+}
+
+
+
+#endif // libavwrapper_H