#include "nodes_audio_analysis.h" namespace Rotor{ bool Audio_thumbnailer::init(int _channels,int _bits,int _samples,int _rate) { //base_audio_processor::init(_channels,_bits,_samples); channels=_channels; bits=_bits; samples=_samples; samples_per_column=samples/width; offset=0x1<<(bits-1); //signed audio scale=1.0/offset; out_sample=0; //sample in whole track sample=0; samples=0; accum=0.0; return true; } int Audio_thumbnailer::process_frame(uint8_t *_data,int samples_in_frame){ //begin by processing remaining samples //samples per column could be larger than a frame! (probably is) //but all we are doing is averaging int bytes=(bits>>3); int stride=channels*bytes; int in_sample=0; while (in_sample0) vdata+=","; vdata+=toString(sample); } XML.addValue("data",vdata); } bool Vamp_node::init(int _channels,int _bits,int _samples, int _rate) { //need these to make sense of data channels=_channels; bits=_bits; samples=_samples; features.clear(); return analyser.init(soname,id,_channels,_bits,_samples,_rate,outputNo,params); //attempt to load vamp plugin and prepare to receive frames of data //should the audio analysis contain a vamphost or should it inherit? //maybe neater to contain it in terms of headers etc } int Vamp_node::process_frame(uint8_t *data,int samples_in_frame) { analyser.process_frame(data,samples_in_frame); return 1; } void Vamp_node::cleanup() { analyser.cleanup(); features=analyser.features; } string Vamp_node::get_features(){ string data; for (auto i: features) { data=data+" ["+toString(i.second.number)+":"+toString(i.first); if (i.second.values.size()) { data+=" ("; bool first=true; for (auto j: i.second.values) { if (first){ first=false; } else data+=","; data=data+toString(j); } data+=") "; } data+="]"; } return data; } bool sortsegments(std::pair i,std::pair j){ return (i.second > > i,std::pair > > j){ return (i.first i,pair j){ return (i.first > similarities; //what do we want to know about these similarities? // how many are there? map.size() // how many members are in each one? map[item].size() // which members are they? auto m: map[item] uint32_t i=0; for (auto f:analysers["segmenter"].features) { if (f.second.values.size()) { int group=f.second.values[0]; if (similarities.find(group)==similarities.end()){ similarities[group]={}; } similarities[group].push_back(i); } i++; } for (auto s:similarities) { string list=""; for (int j=0;j0) list+=","; list +=toString(s.second[j]); } cerr<<"group "< tempos; vector intensities; vector times; auto g=++analysers["segmenter"].features.begin(); for (auto f=analysers["segmenter"].features.begin();g!=analysers["segmenter"].features.end();f++,g++,i++){ times.push_back(f->first); //integrate tempo and intensity algorithmically double tempo=0; if (analysers["tempo"].features.size()) { double pt=f->first; double pv=analysers["tempo"].get_value(f->first); for (auto u=analysers["tempo"].features.upper_bound(f->first);u!=analysers["tempo"].features.upper_bound(g->first);u++){ tempo +=(u->first-pt)*(u->second.values[0]+pv)*0.5f; //area of the slice pt=u->first; pv=u->second.values[0]; } tempo +=(g->first-pt)*(analysers["tempo"].get_value(g->first)+pv)*0.5f; //area of the last slice tempo /=g->first-f->first; //average value; } if (tempo>max_tempo) max_tempo=tempo; if (tempofirst; double pv=analysers["intensity"].get_value(f->first); for (auto u=analysers["intensity"].features.upper_bound(f->first);u!=analysers["intensity"].features.upper_bound(g->first);u++){ intensity +=(u->first-pt)*(u->second.values[0]+pv)*0.5f; //area of the slice pt=u->first; pv=u->second.values[0]; } intensity +=(g->first-pt)*(analysers["intensity"].get_value(g->first)+pv)*0.5f; //area of the last slice intensity /=g->first-f->first; //average value; } if (intensity>max_intensity) max_intensity=intensity; if (intensityfirst<<" to "<first<<" average tempo: "<value)+(intensity*parameters["intensity_weight"]->value)<> totals; vector totalsmap; for (i=0;ivalue)+(intensities[i]*parameters["intensity_weight"]->value))); totalsmap.push_back((tempos[i]*parameters["tempo_weight"]->value)+(intensities[i]*parameters["intensity_weight"]->value)); } /* //sort and convert to features std::sort(totals.begin(),totals.end(),sortsegments); for (i=0;i bucketoffsets; for (auto t:totals) bucketoffsets.push_back(0.0); if (parameters["levels"]->value>0.0&¶meters["levels"]->valuevalue); double numberperbin=((double)numbertoredistribute/totals.size()); double toadd=0.5f; int added=0; for (int j=0;j0) { cerr<<"reducing number of levels by "<> start grouping by similarity if there are more similarity groups than wantedgroups, start by grouping similarities otherwise take biggest similarity groups and split them by intensity if there are still too many groups, merge closest smallest groups finally sort by intensity to map output nned to retrieve total intensity by segment */ // segment group_intensity seg_intense segment vector > > > seggrps; for (i=0;i > data; data.push_back(make_pair(totalsmap[i],i)); seggrps.push_back(make_pair(totalsmap[i],data)); } for (auto s:similarities){ if (s.second.size()>1){ for (int j=s.second.size()-1;j>0;j--){ seggrps[s.second[0]].second.push_back(make_pair(totalsmap[s.second[j]],s.second[j])); //keep running average// should be by area? //seggrps[s.second[0]].first+=(totalsmap[s.second[j]]*(1.0/max(1,(int)seggrps[s.second[0]].second.size()-1))); //double div=seggrps[s.second[0]].second.size()==1?1.0:((double)seggrps[s.second[0]].second.size()-1/(double)seggrps[s.second[0]].second.size()); //neat! this gives 1,1/2,2/3,3/4.. //seggrps[s.second[0]].first*=div; //easier is to double avg=0.0f; for (auto p:seggrps[s.second[0]].second) avg+=p.first; avg/=seggrps[s.second[0]].second.size(); seggrps[s.second[0]].first=avg; seggrps.erase(seggrps.begin()+s.second[j]); } } } cerr<<"similarities assigned, "<<(totalsmap.size()-seggrps.size())<<" segments merged"<value)>0) { if (seggrps.size()>(int)parameters["levels"]->value){ while (seggrps.size()>(int)parameters["levels"]->value){ //reduce similarity groups //decide the best 2 to merge vector diffs; for (int j=0;jvalue<<" levels requested, "<<(int)totalsmap.size()<<" original segments"<value,(int)totalsmap.size())){ while (seggrps.size()value,(int)totalsmap.size())) { //split groups //calculate standard deviation of intensity variation vector devs; for (int j=0;jdevs[largest]) largest=j; //sanity check: if there are any groups that can be split they will have larger SD than singleton groups //sort members of the group std::sort(seggrps[largest].second.begin(),seggrps[largest].second.end(),sortgroupmembers); //create a new group std::pair > > newgroup; for (int j=seggrps[largest].second.size();j>seggrps[largest].second.size()/2;j--) { newgroup.second.push_back(seggrps[largest].second[j]); seggrps[largest].second.erase(seggrps[largest].second.begin()+j); } //refresh averages for the 2 groups double avg=0.0f; for (auto p:seggrps[largest].second) avg+=p.first; avg/=seggrps[largest].second.size(); seggrps[largest].first=avg; avg=0.0f; for (auto p:newgroup.second) avg+=p.first; avg/=newgroup.second.size(); newgroup.first=avg; //add the new group seggrps.push_back(newgroup); } cerr<<"similaritity groups split, "< outputvalues; for (int j=0;j0) list+=","; list +=toString(seggrps[j].second[k].second); } cerr<<"output value: "<