#include "vampHost.h" void vampHost::printFeatures(int frame, int sr, int output, Plugin::FeatureSet features, ostream& out, bool useFrames) { if (features[output].size()) { cout << "." << features[output].size(); } for (unsigned int i = 0; i < features[output].size(); ++i) { if (useFrames) { int displayFrame = frame; if (features[output][i].hasTimestamp) { displayFrame = RealTime::realTime2Frame (features[output][i].timestamp, sr); } out << displayFrame; if (features[output][i].hasDuration) { displayFrame = RealTime::realTime2Frame (features[output][i].duration, sr); out << "," << displayFrame; } out << ":"; } else { RealTime rt = RealTime::frame2RealTime(frame, sr); if (features[output][i].hasTimestamp) { rt = features[output][i].timestamp; } out << rt.toString(); if (features[output][i].hasDuration) { rt = features[output][i].duration; out<< "," << rt.toString(); } out << ":"; } for (unsigned int j = 0; j < features[output][i].values.size(); ++j) { out<< " " << features[output][i].values[j]; } out << " " << features[output][i].label; out << endl; } } void vampHost::rotorGetFeatures(int frame, int sr, int output,Plugin::FeatureSet features, vector& out, float& progress) { if (features[output].size()) { cout << "." << features[output].size(); } for (unsigned int i = 0; i < features[output].size(); ++i) { int displayFrame = frame; if (features[output][i].hasTimestamp) { displayFrame = RealTime::realTime2Frame (features[output][i].timestamp, sr); } cout << displayFrame; cout << endl; } } void vampHost::getTimestamps(int output,Plugin::FeatureSet features, vector& out){ /* vamp-simple-host qm-vamp-plugins:qm-tempotracker 01.wav 0.046439908: 156.61 bpm 0.429569160: 156.61 bpm 0.812698412: 161.50 bpm 1.184217686: 152.00 bpm vamp-simple-host qm-vamp-plugins:qm-segmenter 01.wav 0.000000000: 4 4 23.800000000: 6 6 44.600000000: 5 5 55.000000000: 7 7 72.800000000: 1 1 90.600000000: 2 2 109.200000000: 5 5 116.000000000: 3 3 143.800000000: 5 5 153.400000000: 3 3 163.000000000: 8 8 seems to be FP seconds then another metric for now we can just take the first part features[output][i].timestamp is of type RealTime: represents time values to nanosecond precision int sec; int nsec; 1 sec = 10^9 nanosec actually maybe this would be the way to go for rotor- avoiding rounding errors etc for now - ideally will get a float representation features[output][i].values is a vector of floats + a description WE DON'T CARE ABOUT ANYTHING <.01 seconds static long realTime2Frame(const RealTime &r, unsigned int sampleRate); get a vector of floats out, using frames, presuming data has a timestamp this is crashing with "Aborted (core dumped)" if we check for timestamp */ cout << "." << features[output].size(); //if (!features[output][0].hasTimestamp) { // cerr << output << " channel, getTimestamps: error, featureset doesn't support timestamp" << endl; //}_ //else { for (unsigned int i = 0; i < features[output].size(); ++i) { out.push_back( ((float)RealTime::realTime2Frame(features[output][i].timestamp, 1000))*.001f); cout << "feature found.\n"; } //} } bool vampHost::Analyser::init(const string &soname,const string &id,const int &_channels,const int &_bits,const int &_samples,const int &_rate,int _outputNo,const map ¶ms){ //stuff that only happens once channels =_channels; samples=_samples; rate=_rate; bits=_bits; outputNo=_outputNo; //output=_output; //http://www.mega-nerd.com/libsndfile/api.html#note1 //libsndfile returns -1..1 for fp data bytes=(bits>>3); stride=channels*bytes; scale=(1.0f/pow(2.0f,bits)); features.clear(); //in case of reuse features[0.0f]=feature(); loader = PluginLoader::getInstance(); key = loader->composePluginKey(soname, id); plugin = loader->loadPlugin(key, _rate, PluginLoader::ADAPT_ALL_SAFE); if (!plugin) { cerr << ": ERROR: Failed to load plugin \"" << id << "\" from library \"" << soname << "\"" << endl; return false; } cerr << "Running plugin: \"" << plugin->getIdentifier() << "\"... Domain:"; if (plugin->getInputDomain() == Plugin::FrequencyDomain) { cerr << "frequency" << endl; } else { cerr << "time" << endl; } blockSize = plugin->getPreferredBlockSize(); stepSize = plugin->getPreferredStepSize(); if (blockSize == 0) { blockSize = 1024; } if (stepSize == 0) { if (plugin->getInputDomain() == Plugin::FrequencyDomain) { stepSize = blockSize/2; } else { stepSize = blockSize; } } else if (stepSize > blockSize) { cerr << "WARNING: stepSize " << stepSize << " > blockSize " << blockSize << ", resetting blockSize to "; if (plugin->getInputDomain() == Plugin::FrequencyDomain) { blockSize = stepSize * 2; } else { blockSize = stepSize; } cerr << blockSize << endl; } overlapSize = blockSize - stepSize; currentStep = 0; finalStepsRemaining = max(1, (blockSize / stepSize) - 1); // at end of file, this many part-silent frames needed after we hit EOF plugbuf = new float*[channels]; for (int c = 0; c < channels; ++c) plugbuf[c] = new float[blockSize + 2]; cerr << "Using block size = " << blockSize << ", step size = " << stepSize << endl; // The channel queries here are for informational purposes only -- // a PluginChannelAdapter is being used automatically behind the // scenes, and it will take case of any channel mismatch int minch = plugin->getMinChannelCount(); int maxch = plugin->getMaxChannelCount(); cerr << "Plugin accepts " << minch << " -> " << maxch << " channel(s)" << endl; cerr << "Sound file has " << channels << " (will mix/augment if necessary)" << endl; Plugin::OutputList outputs = plugin->getOutputDescriptors(); Plugin::OutputDescriptor od; int returnValue = 1; int prog = 0; RealTime rt; PluginWrapper *wrapper = 0; RealTime adjustment = RealTime::zeroTime; if (outputs.empty()) { cerr << "ERROR: Plugin has no outputs!" << endl; return false; } if (outputNo < 0) { for (size_t oi = 0; oi < outputs.size(); ++oi) { if (outputs[oi].identifier == output) { outputNo = oi; break; } } if (outputNo < 0) { cerr << "ERROR: Non-existent output \"" << output << "\" requested" << endl; return false; } } else { if (int(outputs.size()) <= outputNo) { cerr << "ERROR: Output " << outputNo << " requested, but plugin has only " << outputs.size() << " output(s)" << endl; return false; } } od = outputs[outputNo]; cerr << "Output number "<setParameter(i.first,i.second); cerr << "Set plugin parameter: "<initialise(channels, stepSize, blockSize)) { cerr << "ERROR: Plugin initialise (channels = " << channels << ", stepSize = " << stepSize << ", blockSize = " << blockSize << ") failed." << endl; return false; } cerr << "Vamphost Plugin initialised: (channels = " << channels << ", stepSize = " << stepSize << ", blockSize = " << blockSize << ")" << endl; wrapper = dynamic_cast(plugin); if (wrapper) { // See documentation for // PluginInputDomainAdapter::getTimestampAdjustment PluginInputDomainAdapter *ida =wrapper->getWrapper(); if (ida) adjustment = ida->getTimestampAdjustment(); } //everything is prepared to start consuming data in blocks in_block=0; blocks_processed=0; currentStep=0; featureNo=0; return true; } void vampHost::Analyser::process_frame(uint8_t *data,int samples_in_frame){ int sample=0; uint16_t *_data=(uint16_t*)data; //process the whole frame which may be f>1getIdentifier()<<" processed block "<process(plugbuf, rt); float t; for (unsigned int i = 0; i < feat[outputNo].size(); ++i) { feature f; f.number=featureNo; f.values=feat[outputNo][i].values; //fix for plugins that don't set timestamp properly t=((float)feat[outputNo][i].timestamp.sec)+(((float)feat[outputNo][i].timestamp.nsec)*.000000001); if (t<.01) t=((rt.sec)+(rt.nsec)*.000000001); features[t]=f; featureNo++; } //if (feat[outputNo].size()>0) cerr<<"vamphost: "<0) cerr<getIdentifier()<<" step:"<process(plugbuf, rt); for (unsigned int i = 0; i < feat[outputNo].size(); ++i) { feature f; f.number=featureNo; f.values=feat[outputNo][i].values; features[((float)feat[outputNo][i].timestamp.sec)+(((float)feat[outputNo][i].timestamp.nsec)*.000000001)]=f; featureNo++; } feat=plugin->getRemainingFeatures(); for (unsigned int i = 0; i < feat[outputNo].size(); ++i) { feature f; f.number=featureNo; f.values=feat[outputNo][i].values; features[((float)feat[outputNo][i].timestamp.sec)+(((float)feat[outputNo][i].timestamp.nsec)*.000000001)]=f; featureNo++; } //make a final feature at the end feature f; f.number=featureNo; f.values={0}; features[((float)rt.sec)+(((float)rt.nsec)*.000000001f)]=f; //cerr<getIdentifier()<<" found "<<(features.size()-1)<<" features"<first; float v1,v2; v1=v2=0.0f; if (i->second.values.size()) v2=i->second.values[0]; i--; float lk=i->first; int ln=i->second.number; if (i->second.values.size()) v1=i->second.values[0]; return ((((time-lk)/(uk-lk))*(v2-v1))+v1); } } return 0.0f; }