//this seems to be obsolete!!!!!!!!!!!!!!!!! bool Render_context::load_audio(string &filename){ //load audio into memory and create thumbnail av_register_all(); avcodec_register_all(); //av_register_all(); //av_ini AVFormatContext* container=avformat_alloc_context(); if(avformat_open_input(&container,filename.c_str(),NULL,NULL)<0){ cerr << "Could not open file" << endl; return false; } //header in avformat.h definition in utils.c in ffmpeg/libavformat //int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options) //set input or dictionary?? if(av_find_stream_info(container)<0){ cerr << "Could not find file info"<< endl; return false; } //added to try and locate missing audio streams //container->flags |= AVFMT_FLAG_NONBLOCK; // cerr << "found " << container->nb_streams << " streams" << endl; av_dump_format(container,0,filename.c_str(),false); cerr << "found " << container->nb_streams << " streams" << endl; //typedef struct AVFormatContext { // unsigned int nb_streams; //AVStream **streams; /** * A list of all streams in the file. New streams are created with * avformat_new_stream(). * * decoding: streams are created by libavformat in avformat_open_input(). * If AVFMTCTX_NOHEADER is set in ctx_flags, thandroid on linux i386en new streams may also * appear in av_read_frame(). * encoding: streams are created by the user before avformat_write_header(). */ int stream_id=-1; int i; for(i=0;inb_streams;i++){ cerr << "stream " << i << ": codec type " << container->streams[i]->codec->codec_type << endl; if(container->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){ stream_id=i; break; } } if(stream_id==-1){ cerr << "Could not find Audio Stream"<< endl; return false; } AVDictionary *metadata=container->metadata; AVCodecContext *ctx=container->streams[stream_id]->codec; AVCodec *codec=avcodec_find_decoder(ctx->codec_id); if(codec==NULL){ cerr <<"cannot find codec!"<< endl; return false; } if(avcodec_open(ctx,codec)<0){ cerr <<"Codec cannot be found"<< endl; return false; } //ctx=avcodec_alloc_context3(codec); AVSampleFormat sfmt=ctx->sample_fmt; if(sfmt==AV_SAMPLE_FMT_U8){ printf("U8\n"); //sformat.bits=8; }else if(sfmt==AV_SAMPLE_FMT_S16){ printf("S16\n"); //sformat.bits=16; }else if(sfmt==AV_SAMPLE_FMT_S32){ printf("S32\n"); //sformat.bits=32; } // sformat.channels=ctx->channels; // sformat.rate=ctx->sample_rate; // sformat.byte_format=AO_FMT_NATIVE; // sformat.matrix=0; AVPacket packet; av_init_packet(&packet); AVFrame *frame=avcodec_alloc_frame(); int buffer_size=AVCODEC_MAX_AUDIO_FRAME_SIZE+ FF_INPUT_BUFFER_PADDING_SIZE;; uint8_t buffer[buffer_size]; packet.data=buffer; packet.size =buffer_size; int len; int frameFinished=0; while(av_read_frame(container,&packet)>=0) { if(packet.stream_index==stream_id){ //printf("Audio Frame read \n"); int len=avcodec_decode_audio4(ctx,frame,&frameFinished,&packet); //frame-> if(frameFinished){ //printf("Finished reading Frame len : %d , nb_samples:%d buffer_size:%d line size: %d \n",len,frame->nb_samples,buffer_size,frame->linesize[0]); //ao_play(adevice, (char*)frame->extended_data[0],frame->linesize[0] ); //DO SOMETHING WITH THE DATA HERE }else{ //printf("Not Finished\n"); } }else { cerr << "Avcodec found some other packet possibly Video\n"; } } av_close_input_file(container); return true; }