diff options
| author | Tim Redfern <tim@herge.(none)> | 2013-04-12 17:23:15 +0100 |
|---|---|---|
| committer | Tim Redfern <tim@herge.(none)> | 2013-04-12 17:23:15 +0100 |
| commit | 4c99697c528e11a4195b572bf9f72f80c2fe3ea6 (patch) | |
| tree | 2bde485776fe5717e5de647094ed7ea18130b49d /testint.c | |
| parent | 31d5bb487a6a245c80fb2154a8eca99c9ff4e6e6 (diff) | |
adding container
Diffstat (limited to 'testint.c')
| -rw-r--r-- | testint.c | 199 |
1 files changed, 199 insertions, 0 deletions
diff --git a/testint.c b/testint.c new file mode 100644 index 0000000..6521ba3 --- /dev/null +++ b/testint.c @@ -0,0 +1,199 @@ +extern "C" { + #include "libavcodec/avcodec.h" + #include "libavformat/avformat.h" + #include "libavutil/opt.h" + #include "libavutil/channel_layout.h" + #include "libavutil/common.h" + #include "libavutil/imgutils.h" + #include "libavutil/mathematics.h" + #include "libavutil/samplefmt.h" + + #include <libavutil/imgutils.h> + #include <libavutil/samplefmt.h> + //#include <libavutil/timestamp.h> +} + +#include <libavutil/imgutils.h> +#include <libavutil/samplefmt.h> + +#include <iostream> +#include <fstream> + + +int main(int argc, char** argv) +{ + av_register_all(); + + if (argc<1) { + cerr<< "use: test audiofile" << endl; + return 1; + } + + + AVFrame* frame = avcodec_alloc_frame(); + if (!frame) + { + std::cout << "Error allocating the frame" << std::endl; + return false; + } + + AVFormatContext* formatContext = NULL; + if (avformat_open_input(&formatContext, filename.c_str(), NULL, NULL) != 0) + { + av_free(frame); + std::cout << "Error opening the file" << std::endl; + return false; + } + + + if (avformat_find_stream_info(formatContext, NULL) < 0) + { + av_free(frame); + av_close_input_file(formatContext); + std::cout << "Error finding the stream info" << std::endl; + return false; + } + + AVStream* audioStream = NULL; + // Find the audio stream (some container files can have multiple streams in them) + for (unsigned int i = 0; i < formatContext->nb_streams; ++i) + { + if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) + { + audioStream = formatContext->streams[i]; + break; + } + } + + if (audioStream == NULL) + { + av_free(frame); + av_close_input_file(formatContext); + std::cout << "Could not find any audio stream in the file" << std::endl; + return false; + } + + AVCodecContext* codecContext = audioStream->codec; + + codecContext->codec = avcodec_find_decoder(codecContext->codec_id); + if (codecContext->codec == NULL) + { + av_free(frame); + av_close_input_file(formatContext); + std::cout << "Couldn't find a proper decoder" << std::endl; + return false; + } + else if (avcodec_open2(codecContext, codecContext->codec, NULL) != 0) + { + av_free(frame); + av_close_input_file(formatContext); + std::cout << "Couldn't open the context with the decoder" << std::endl; + return false; + } + + /* + + // + // why is the file truncated??? + //if(codecContext->codec->capabilities & CODEC_CAP_TRUNCATED) codecContext->codec->capabilities|=CODEC_FLAG_TRUNCATED; + // + // + + av_dump_format(formatContext, 0, 0, false); //avformat.h line 1256 + int samples = ((formatContext->duration + 5000)*codecContext->sample_rate)/AV_TIME_BASE; + + std::cout << "This stream has " << codecContext->channels << " channels, a sample rate of " << codecContext->sample_rate << "Hz and "<<samples <<" samples" << std::endl; + std::cout << "The data is in format " <<codecContext->sample_fmt<< " (aka "<< av_get_sample_fmt_name(codecContext->sample_fmt) << ") "<<std::endl; + + //we can now tell the processors the format + //we can work out the number of samples at this point + + for (auto p: processors) { + p->init(codecContext->channels,16,samples); + } + + AVPacket packet; + av_init_packet(&packet); + int sample_processed=0; + + bool diag=true; + + // Read the packets in a loop + while (true) + //while(sample_processed<samples) + { + int ret=av_read_frame(formatContext, &packet); + if (ret<0) { + cerr << "finished with code "<<ret <<(ret==AVERROR_EOF?" ,EOF":"")<<endl; + break; + } + //av_read_frame(formatContext, &packet); //hangs once the packets have been read + if (packet.stream_index == audioStream->index) + { + // Try to decode the packet into a frame + int frameFinished = 0; + int bytes = avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet); + + // Some frames rely on multiple packets, so we have to make sure the frame is finished before + // we can use it + if (frameFinished) + { + // frame now has usable audio data in it. How it's stored in the frame depends on the format of + // the audio. If it's packed audio, all the data will be in frame->data[0]. If it's in planar format, + // the data will be in frame->data and possibly frame->extended_data. Look at frame->data, frame->nb_samples, + // frame->linesize, and other related fields on the FFmpeg docs. I don't know how you're actually using + // the audio data, so I won't add any junk here that might confuse you. Typically, if I want to find + // documentation on an FFmpeg structure or function, I just type "<name> doxygen" into google (like + // "AVFrame doxygen" for AVFrame's docs) + + //av_get_channel_layout_string (char *buf, int buf_size, int nb_channels, uint64_t channel_layout) + + if (diag) { + cerr << "first frame: "<<bytes << ", "<<frame->nb_samples<<" samples in "<<av_get_sample_fmt_name(frame->format)<<" format with channel layout "<<frame->channel_layout<< std::endl; + diag=false; + } + + //std::cout << "Got a frame: bytes " << bytes << ", "<<frame->nb_samples<<" samples"<<std::endl; + //now we can pass the data to the processor(s) + for (auto p: processors) { + sample_processed=p->process_frame(frame->data[0],frame->nb_samples); + } + + mutex.lock(); + progress=((double)sample_processed)/samples; + mutex.unlock(); + } + } + // You *must* call av_free_packet() after each call to av_read_frame() or else you'll leak memory + av_free_packet(&packet); //crashes here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! SIGSEV In _int_free (av=0xb4600010, p=0xb46025c8, have_lock=0) at malloc.c:4085 () + } + + // Some codecs will cause frames to be buffered up in the decoding process. If the CODEC_CAP_DELAY flag + // is set, there can be buffered up frames that need to be flushed, so we'll do that + if (codecContext->codec->capabilities & CODEC_CAP_DELAY) + { + av_init_packet(&packet); + // Decode all the remaining frames in the buffer, until the end is reached + int frameFinished = 0; + int bytes = avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet); + while (bytes >= 0 && frameFinished) + { + for (auto p: processors) { + p->process_frame(frame->data[0],frame->nb_samples); + } + mutex.lock(); + progress=((double)sample_processed)/samples; + mutex.unlock(); + } + } + + cerr << "finished processed: "<<sample_processed << " samples of "<<samples<<" , "<<((double)sample_processed*100)/samples<<"%"<< std::endl; + */ + + // Clean up! + av_free(frame); + avcodec_close(codecContext); + av_close_input_file(formatContext); + + return 0; +} |
