summaryrefslogtreecommitdiff
path: root/rotord/src/nodes_audio_analysis.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'rotord/src/nodes_audio_analysis.cpp')
-rw-r--r--rotord/src/nodes_audio_analysis.cpp72
1 files changed, 2 insertions, 70 deletions
diff --git a/rotord/src/nodes_audio_analysis.cpp b/rotord/src/nodes_audio_analysis.cpp
index f077d41..2a39d8c 100644
--- a/rotord/src/nodes_audio_analysis.cpp
+++ b/rotord/src/nodes_audio_analysis.cpp
@@ -122,7 +122,7 @@ namespace Rotor{
//how to group with similarity?
//segments come with similarity groups
- // 1 - are the wanted segments less than discovered?
+ // 1 - are the wanted git checksegments less than discovered?
// N - do nothing
// 2 - get intensity and tempo averages
// 2 - count the groups
@@ -218,7 +218,7 @@ namespace Rotor{
totalsmap.push_back((tempos[i]*parameters["tempo_weight"]->value)+(intensities[i]*parameters["intensity_weight"]->value));
}
- /*
+
//sort and convert to features
std::sort(totals.begin(),totals.end(),sortsegments);
for (i=0;i<totals.size();i++) {
@@ -252,75 +252,7 @@ namespace Rotor{
f.values.push_back((double)i-bucketoffsets[i]);
features[times[totals[i].first]]=f;
}
- */
- /*
-sort intensity totals
-find out how many segments will share levels apart from similarity levels
-start with a structure:
-map<inputnum,vector<pair<tempo,inputnum>>
-start grouping by similarity
-if there are more similarity groups than wantedgroups, start by grouping similarities
-otherwise take biggest similarity groups and split them by intensity
-if there are still too many groups, merge closest smallest groups
-finally sort by intensity to map output
-
-nned to retrieve total intensity by segment
- */
- // segment group_intensity seg_intense segment
- vector<pair<double,vector<pair<double,int> > > > seggrps;
- for (i=0;i<totalsmap.size();i++){
- vector<pair<double,int> > data;
- data.push_back(make_pair(totalsmap[i],i));
- seggrps.push_back(make_pair(totalsmap[i],data));
- }
- for (auto s:similarities){
- if (s.second.size()>1){
- for (int j=s.second.size()-1;j>0;j--){
- seggrps[s.second[0]].second.push_back(make_pair(totalsmap[s.second[j]],s.second[j]));
- //keep running average// should be by area?
- seggrps[s.second[0]].first+=(totalsmap[s.second[j]]*(1.0/max(1,(int)seggrps[s.second[0]].second.size()-1)));
- double div=seggrps[s.second[0]].second.size()==1?1.0:((double)seggrps[s.second[0]].second.size()-1/(double)seggrps[s.second[0]].second.size());
- //neat! this gives 1,1/2,2/3,3/4..
- seggrps[s.second[0]].first*=div;
- seggrps.erase(seggrps.begin()+s.second[j]);
- }
- }
- }
- cerr<<"similarities assigned, "<<(totalsmap.size()-seggrps.size())<<" segments merged"<<endl;
- //sort the contents by intensity
- std::sort(seggrps.begin(),seggrps.end(),sortseggrps);
- //possible mergers will be with groups with adjacent intensity
- while (seggrps.size()>(int)parameters["levels"]->value){
- //reduce similarity groups
- //decide the best 2 to merge
- vector<double> diffs;
- for (int j=0;j<seggrps.size()-1;j++) diffs.push_back(seggrps[j+1].first-seggrps[j].first);
- int smallest=0;
- for (int j=1;j<diffs.size();j++) if (diffs[i]<diffs[smallest]) smallest=i;
- for (int j=0;j<seggrps[smallest].second.size();j++) {
- seggrps[smallest+1].second.push_back(seggrps[smallest].second[j]);
- }
- seggrps.erase(seggrps.begin()+smallest);
- }
- cerr<<"intensities merged, "<<seggrps.size()<<" levels remain"<<endl;
- while (seggrps.size()<(int)parameters["levels"]->value) {
- //split groups
- }
-
- map<int,int> outputvalues;
- for (int j=0;j<seggrps.size();j++){
- for (int k=0;k<seggrps[j].second.size();k++){
- outputvalues[seggrps[j].second[k].second]=j;
- }
- }
-
-
- for (i=0;i<totals.size();i++){
- vampHost::feature f;
- f.values.push_back(outputvalues[i]);
- features[times[totals[i].first]]=f;
- }
}
}