Commit 349e7c3e authored by mzed's avatar mzed
Browse files

Merge branch 'dev'

parents ac804792 7863b142
......@@ -41,9 +41,10 @@ int main(int argc, const char * argv[]) {
//////////////////////////////////////////////////////////////////////////////simple multilayer test
//This takes forever, I don't always run it
//#define MULTILAYER 1
#define MULTILAYER 1
#ifdef MULTILAYER
regression myNN2;
regression myNN_ML1;
regression myNN_ML2;
std::vector<trainingExample> trainingSet1;
trainingExample tempExample1;
......@@ -53,15 +54,21 @@ int main(int argc, const char * argv[]) {
tempExample1.input = { 2.0, 2.0, 2.0 };
tempExample1.output = { 1.3 };
trainingSet1.push_back(tempExample1);
myNN2.setNumHiddenLayers(2);
assert(myNN2.getNumHiddenLayers()[0] == 2);
myNN2.setNumEpochs(50000);
myNN2.train(trainingSet1);
myNN_ML2.setNumHiddenLayers(2);
assert(myNN_ML2.getNumHiddenLayers()[0] == 2);
myNN_ML2.setNumEpochs(1000);
assert(myNN_ML2.getNumEpochs()[0] == 1000);
std::vector<double> inputVec1 = { 2.0, 2.0, 2.0 };
std::cout << myNN2.run(inputVec1)[0] <<std::endl;
myNN_ML1.train(trainingSet1);
myNN_ML2.train(trainingSet1);
std::vector<double> inputVec1 = { 1.1, 1.1, 1.1 };
std::cout << "single layer: " << myNN_ML1.run(inputVec1)[0] <<std::endl;
std::cout << "multilayer: " << myNN_ML2.run(inputVec1)[0] <<std::endl;
//assert(myNN_ML1.run(inputVec1)[0] == myNN_ML2.run(inputVec1)[0]);
/*
myNN2.reset();
trainingSet1.clear();
tempExample1.input = {0., 0. };
......@@ -84,8 +91,8 @@ int main(int argc, const char * argv[]) {
inputVec1 = { 0.9, 0.7 };
std::cout << myNN2.run(inputVec1)[0] <<std::endl;
*/
#endif
////////////////////////////////////////////////////////////////////////////////
regression myNN;
......@@ -126,8 +133,8 @@ int main(int argc, const char * argv[]) {
assert(myNN.run(inputVec)[0] == 20.14);
assert(myNN_nodes.run(inputVec)[0] == 20.14);
//assert(myNN.run(inputVec)[0] == myNNfromString.run(inputVec)[0]);
//assert(myNN.run(inputVec)[0] == myNNfromFile.run(inputVec)[0]);
assert(myNN.run(inputVec)[0] == myNNfromString.run(inputVec)[0]);
assert(myNN.run(inputVec)[0] == myNNfromFile.run(inputVec)[0]);
//Testing exceptions for regression
std::vector<double> emptyVec = {};
......@@ -201,8 +208,8 @@ int main(int argc, const char * argv[]) {
//std::cout << "knn from file: " << myKnnFromFile.run(inputVec)[0] << std::endl;
assert(myKnn.run(inputVec)[0] == 20);
//assert(myKnn.run(inputVec)[0] == myKnnFromString.run(inputVec)[0]);
//assert(myKnn.run(inputVec)[0] == myKnnFromFile.run(inputVec)[0]);
assert(myKnn.run(inputVec)[0] == myKnnFromString.run(inputVec)[0]);
assert(myKnn.run(inputVec)[0] == myKnnFromFile.run(inputVec)[0]);
try {
myKnn.run(emptyVec);
......@@ -397,7 +404,54 @@ int main(int argc, const char * argv[]) {
////////////////////////////////////////////////////////////////////////
//#define layerTest 1
#ifdef layerTest
//Machine Learning
regression mtofRegression; //Create a machine learning object
mtofRegression.setNumHiddenLayers(2);
std::cout << "epochs: " << mtofRegression.getNumEpochs()[0] << std::endl;
mtofRegression.setNumEpochs(5000);
std::vector<trainingExample> trainingSet_mtof;
trainingExample tempExample_mtof;
//Setting up the first element of training data
tempExample_mtof.input = { 48 };
tempExample_mtof.output = { 130.81 };
trainingSet_mtof.push_back(tempExample_mtof);
//More elements
tempExample_mtof.input = { 54 };
tempExample_mtof.output = { 185.00 };
trainingSet_mtof.push_back(tempExample_mtof);
tempExample_mtof.input = { 60 };
tempExample_mtof.output = { 261.63 };
trainingSet_mtof.push_back(tempExample_mtof);
tempExample_mtof.input = { 66 };
tempExample_mtof.output = { 369.994 };
trainingSet_mtof.push_back(tempExample_mtof);
tempExample_mtof.input = { 72 };
tempExample_mtof.output = { 523.25 };
trainingSet_mtof.push_back(tempExample_mtof);
//Train the machine learning model with the data
mtofRegression.train(trainingSet_mtof);
//Get some user input
int newNote = 0;
std::cout << "Type a MIDI note number.\n"; std::cin >> newNote;
//Run the trained model on the user input
std::vector<double> inputVec_mtof = { double(newNote) };
double freqHz = mtofRegression.run(inputVec_mtof)[0];
std::cout << "MIDI note " << newNote << " is " << freqHz << " Hertz" << std::endl;
#endif
///////////////////////////////////////////////////////////////////////////////////////////////
return 0;
}
......@@ -15,7 +15,7 @@
void filtfilt(vector<double> const& b, vector<double> const& a, vector<double> & x, vector<double> & y, PADTYPE padtype, int padlen)
{
int ntaps = max(a.size(), b.size());
int ntaps = int(max(a.size(), b.size()));
if (padtype == NONE)
padlen=0;
......@@ -81,7 +81,7 @@ void lfilter(vector<double> const& b, vector<double> const& a, vector<double> co
vector<double> _a = a;
// Pad a or b with zeros so they are the same length.
unsigned int k = max(a.size(), b.size());
unsigned int k = int (max(a.size(), b.size()));
if (_a.size() < k)
_a.resize(k, 0.);
......@@ -97,7 +97,7 @@ void lfilter(vector<double> const& b, vector<double> const& a, vector<double> co
}
vector<double> z = zi;
unsigned int n = x.size();
unsigned int n = int(x.size());
y.resize(n);
for (unsigned int m=0; m<n; m++) {
y[m] = _b[0] * x[m] + z[0];
......@@ -121,7 +121,7 @@ void lfilter_zi(vector<double> const& b, vector<double> const& a, vector<double>
}
}
unsigned int n = max(_a.size(), _b.size());
unsigned int n = int (max(_a.size(), _b.size()));
// Pad a or b with zeros so they are the same length.
if (_a.size() < n)
......
......@@ -54,7 +54,7 @@ void even_ext(vector<datatype> const& src, vector<datatype> & dst, unsigned int
copy(src.begin(), src.end(), dst.begin()+n);
t += src.size();
for (unsigned int i=src.size()-2; i>src.size()-n-2; i--) {
for (unsigned int i = int (src.size()-2); i>src.size()-n-2; i--) {
dst[t++] = src[i];
}
}
......@@ -77,7 +77,7 @@ void odd_ext(vector<datatype> const& src, vector<datatype> & dst, unsigned int n
copy(src.begin(), src.end(), dst.begin()+n);
t += src.size();
for (unsigned int i=src.size()-2; i>src.size()-n-2; i--) {
for (unsigned int i = int (src.size() - 2); i>src.size()-n-2; i--) {
dst[t++] = 2 * src[src.size()-1] - src[i];
}
}
......@@ -100,7 +100,7 @@ void const_ext(vector<datatype> const& src, vector<datatype> & dst, unsigned int
copy(src.begin(), src.end(), dst.begin()+n);
t += src.size();
for (unsigned int i=src.size()-2; i>src.size()-n-2; i--) {
for (unsigned int i = int (src.size() - 2); i>src.size()-n-2; i--) {
dst[t++] = src[src.size()-1];
}
}
......
......@@ -18,7 +18,7 @@
*/
template<typename T>
class classificationTemplate : public modelSet<T> {
class classificationTemplate final : public modelSet<T> {
public:
enum classificationTypes { knn, svm };
......@@ -37,7 +37,7 @@ public:
~classificationTemplate() {}
/** Train on a specified set, causes creation if not created */
bool train(const std::vector<trainingExampleTemplate<T> > &trainingSet);
bool train(const std::vector<trainingExampleTemplate<T> > &trainingSet) override;
/** Check the K values for each model. This feature is temporary, and will be replaced by a different design. */
std::vector<int> getK();
......
......@@ -65,7 +65,7 @@ T dtw<T>::getCost(const std::vector<std::vector<T> > &seriesX, const std::vector
};
template<typename T>
warpPath dtw<T>::calculatePath(int seriesXsize, int seriesYsize) {
warpPath dtw<T>::calculatePath(int seriesXsize, int seriesYsize) const {
warpPath warpPath;
int i = seriesXsize - 1;
int j = seriesYsize - 1;
......
......@@ -32,7 +32,7 @@ public:
private:
inline T distanceFunction(const std::vector<T> &pointX, const std::vector<T> &point);
std::vector<std::vector<T> > costMatrix;
warpPath calculatePath(int seriesXsize, int seriesYsize);
warpPath calculatePath(int seriesXsize, int seriesYsize) const;
};
#endif /* dtw_h */
......@@ -10,7 +10,7 @@
"use strict";
console.log("RapidLib 05.12.2017 14:05");
console.log("RapidLib 19.12.2017 10:59");
/**
* Utility function to convert js objects into C++ trainingSets
......@@ -566,7 +566,7 @@ Module.SeriesClassification.prototype = {
Module.StreamBuffer = function (windowSize) {
if (windowSize) {
this.rapidStream = new Module.RapidStreamCpp(windowSize);
this.rapidStream = new Module.RapidStreamCpp(parseFloat(windowSize));
} else {
this.rapidStream = new Module.RapidStreamCpp();
}
......@@ -614,6 +614,13 @@ Module.StreamBuffer.prototype = {
maximum: function () {
return this.rapidStream.maximum();
},
/**
* Count the number of zero crossings in the buffer.
* @return {number} number of zero crossings.
*/
numZeroCrossings: function () {
return this.rapidStream.numZeroCrossings();
},
/**
* Calculate the sum of all values in the buffer.
* @return {number} sum.
......
......@@ -23,6 +23,7 @@ EMSCRIPTEN_BINDINGS(rapidStream_module) {
.function("acceleration", &rapidStream<double>::acceleration)
.function("minimum", &rapidStream<double>::minimum)
.function("maximum", &rapidStream<double>::maximum)
.function("numZeroCrossings", &rapidStream<double>::numZeroCrossings)
.function("sum", &rapidStream<double>::sum)
.function("mean", &rapidStream<double>::mean)
.function("standardDeviation", &rapidStream<double>::standardDeviation)
......
......@@ -54,7 +54,7 @@ warpPath fastDTW<T>::getWarpPath(const std::vector<std::vector<T>> &seriesX, con
};
template<typename T>
std::vector<std::vector<T> > fastDTW<T>::downsample(const std::vector<std::vector<T>> &series, T resolution) {
inline std::vector<std::vector<T> > fastDTW<T>::downsample(const std::vector<std::vector<T>> &series, T resolution) {
std::vector<std::vector<T> > shrunkenSeries;
for (int i = 0; i < series.size(); ++i) {
if (i % 2 == 0) {
......
......@@ -53,7 +53,7 @@ private:
* @param resolution (not used)
* @return downsampled series
`*/
static std::vector<std::vector<T> > downsample(const std::vector<std::vector<T>> &series, T resolution);
inline static std::vector<std::vector<T> > downsample(const std::vector<std::vector<T>> &series, T resolution);
};
......
......@@ -18,7 +18,7 @@
/** Class for implementing a knn classifier */
template<typename T>
class knnClassification : public baseModel<T> {
class knnClassification final : public baseModel<T> {
public:
/** Constructor that takes training examples in
......@@ -43,27 +43,27 @@ public:
* @param A standard vector of type T to be evaluated.
* @return A single value of type T: the nearest class as determined by k-nearest neighbor.
*/
T run(const std::vector<T> &inputVector);
T run(const std::vector<T> &inputVector) override;
/** Fill the model with a vector of examples.
*
* @param The training set is a vector of training examples that contain both a vector of input values and a value specifying desired output class.
*
*/
void train(const std::vector<trainingExampleTemplate<T> > &trainingSet);
void train(const std::vector<trainingExampleTemplate<T> > &trainingSet) override;
/** Reset the model to its empty state. */
void reset();
void reset() override;
/** Find out how many inputs the model expects
* @return Integer number of intpus
*/
int getNumInputs() const;
int getNumInputs() const override;
/** Find out which inputs in a vector will be used
* @return Vector of ints, specifying input indices.
*/
std::vector<int> getWhichInputs() const;
std::vector<int> getWhichInputs() const override;
/** Get the number of nearest neighbours used by the kNN algorithm. */
int getK() const;
......@@ -76,7 +76,7 @@ public:
/** Populate a JSON value with a description of the current model
* @param A JSON value to be populated
*/
void getJSONDescription(Json::Value &currentModel);
void getJSONDescription(Json::Value &currentModel) override;
#endif
private:
......
......@@ -226,6 +226,7 @@ void modelSet<T>::json2modelSet(const Json::Value &root) {
myModelSet.push_back(new knnClassification<T>(modelNumInputs, whichInputs, trainingSet, k));
}
}
created = true;
}
template<typename T>
......
......@@ -24,7 +24,7 @@
* This class includes both running and training, and constructors for reading trained models from JSON.
*/
template<typename T>
class neuralNetwork : public baseModel<T> {
class neuralNetwork final : public baseModel<T> {
public:
/** This is the constructor for building a trained model from JSON. */
......@@ -60,12 +60,12 @@ public:
* @param A standard vector of type T that feed-forward regression will run on.
* @return A single value, which is the result of the feed-forward operation
*/
T run(const std::vector<T> &inputVector);
T run(const std::vector<T> &inputVector) override;
void reset();
void reset() override;
int getNumInputs() const;
std::vector<int> getWhichInputs() const;
int getNumInputs() const override;
std::vector<int> getWhichInputs() const override;
int getNumHiddenLayers() const;
void setNumHiddenLayers(int num_hidden_layers);
......@@ -85,7 +85,7 @@ public:
T getOutBase() const;
#ifndef EMSCRIPTEN
void getJSONDescription(Json::Value &currentModel);
void getJSONDescription(Json::Value &currentModel) override;
#endif
......@@ -123,7 +123,7 @@ public:
* @param The training set is a vector of training examples that contain both a vector of input values and a value specifying desired output.
*
*/
void train(const std::vector<trainingExampleTemplate<T> > &trainingSet);
void train(const std::vector<trainingExampleTemplate<T> > &trainingSet) override;
private:
/** Parameters that influence learning */
......
......@@ -15,7 +15,7 @@
#endif
template<typename T>
rapidStream<T>::rapidStream(int window_size) {
rapidStream<T>::rapidStream (int window_size) {
windowSize = window_size;
windowIndex = 0;
circularWindow = new T[window_size];
......@@ -24,8 +24,8 @@ rapidStream<T>::rapidStream(int window_size) {
}
//Baysian Filter setup
bayesFilt.diffusion = powf(10., -2);
bayesFilt.jump_rate = powf(10., -10);
bayesFilt.diffusion = powf (10., -2);
bayesFilt.jump_rate = powf (10., -10);
bayesFilt.mvc[0] = 1.;
bayesFilt.init();
......@@ -62,22 +62,22 @@ void rapidStream<T>::pushToWindow(T input) {
}
template<typename T>
inline T rapidStream<T>::calcCurrentVel(int i) {
return circularWindow[(i + windowIndex) % windowSize] - circularWindow[(i + windowIndex - 1) % windowSize];
inline T rapidStream<T>::calcCurrentVel(int i) const {
return circularWindow[ (i + windowIndex) % windowSize] - circularWindow[ (i + windowIndex - 1) % windowSize];
}
template<typename T>
T rapidStream<T>::velocity() {
return calcCurrentVel(-1);
T rapidStream<T>::velocity() const {
return calcCurrentVel (-1);
};
template<typename T>
T rapidStream<T>::acceleration() {
return calcCurrentVel(-2) - calcCurrentVel(-3);
T rapidStream<T>::acceleration() const {
return calcCurrentVel (-2) - calcCurrentVel (-3);
};
template<typename T>
T rapidStream<T>::minimum() {
T rapidStream<T>::minimum() const {
T minimum = std::numeric_limits<T>::infinity();
for (int i = 0; i < windowSize; ++i) {
if (circularWindow[i] < minimum) {
......@@ -88,7 +88,7 @@ T rapidStream<T>::minimum() {
}
template<typename T>
T rapidStream<T>::maximum() {
T rapidStream<T>::maximum() const {
T maximum = std::numeric_limits<T>::min();
for (int i = 0; i < windowSize; ++i) {
if (circularWindow[i] > maximum) {
......@@ -99,22 +99,48 @@ T rapidStream<T>::maximum() {
}
template<typename T>
T rapidStream<T>::sum() {
uint32_t rapidStream<T>::numZeroCrossings() const {
uint32_t zeroCrossings = 0;
//Is the begininng positive, negative, or 0?
int previous = 1;
if (circularWindow[windowIndex] < 0) {
previous = -1;
} else if (circularWindow[windowIndex] == 0) {
++zeroCrossings;
previous = 0;
}
for (int i =1; i < windowSize; ++i) {
int index = (windowIndex + i) % windowSize;
if (circularWindow[index] < 0 && previous >=0) { //Transition to negative
++zeroCrossings;
previous = -1;
} else if (circularWindow[index] > 0 && previous <= 0){ //Transition to positive
++zeroCrossings;
previous = 1;
} else { //Sample == 0
previous = 0;
}
}
return zeroCrossings;
}
template<typename T>
T rapidStream<T>::sum() const {
T newSum = 0;
for(int i = 0; i < windowSize; ++i)
{
for (int i = 0; i < windowSize; ++i) {
newSum += circularWindow[i];
}
return newSum;
}
template<typename T>
T rapidStream<T>::mean() {
T rapidStream<T>::mean() const {
return sum()/windowSize;
}
template<typename T>
T rapidStream<T>::standardDeviation() {
T rapidStream<T>::standardDeviation() const {
T newMean = mean();
T standardDeviation = 0.;
for(int i = 0; i < windowSize; ++i) {
......@@ -124,7 +150,7 @@ T rapidStream<T>::standardDeviation() {
}
template<typename T>
T rapidStream<T>::rms() {
T rapidStream<T>::rms() const {
T rms = 0;
for (int i = 0; i < windowSize; ++i) {
rms += (circularWindow[i] * circularWindow[i]);
......@@ -136,19 +162,19 @@ T rapidStream<T>::rms() {
template<typename T>
T rapidStream<T>::bayesFilter(T input) {
std::vector<float> inputVec = { float(input) };
bayesFilt.update(inputVec);
return T(bayesFilt.output[0]);
bayesFilt.update (inputVec);
return T (bayesFilt.output[0]);
}
template<typename T>
void rapidStream<T>::bayesSetDiffusion(float diffusion) {
bayesFilt.diffusion = powf(10., diffusion);
bayesFilt.diffusion = powf (10., diffusion);
bayesFilt.init();
}
template<typename T>
void rapidStream<T>::bayesSetJumpRate(float jump_rate) {
bayesFilt.jump_rate = powf(10., jump_rate);
bayesFilt.jump_rate = powf (10., jump_rate);
bayesFilt.init();
}
......@@ -160,10 +186,10 @@ void rapidStream<T>::bayesSetMVC(float mvc) {
template<typename T>
T rapidStream<T>::minVelocity() {
T rapidStream<T>::minVelocity() const {
T minVel = std::numeric_limits<T>::infinity();
for (int i = 0; i < windowSize; ++i) {
T currentVel = calcCurrentVel(i);
T currentVel = calcCurrentVel (i);
if ( currentVel < minVel) {
minVel = currentVel;
}
......@@ -172,10 +198,10 @@ T rapidStream<T>::minVelocity() {
}
template<typename T>
T rapidStream<T>::maxVelocity() {
T rapidStream<T>::maxVelocity() const {
T maxVel = std::numeric_limits<T>::lowest();
for (int i = 0; i < windowSize; ++i) {
T currentVel = calcCurrentVel(i);
T currentVel = calcCurrentVel (i);
if (currentVel > maxVel) {
maxVel = currentVel;
}
......@@ -184,11 +210,11 @@ T rapidStream<T>::maxVelocity() {
}
template<typename T>
T rapidStream<T>::minAcceleration() {
T rapidStream<T>::minAcceleration() const {
T minAccel = std::numeric_limits<T>::infinity();
T lastVel = calcCurrentVel(1);
T lastVel = calcCurrentVel (1);
for (int i = 2; i < windowSize; ++i) {
T currentVel = calcCurrentVel(i);
T currentVel = calcCurrentVel (i);
T currentAccel = currentVel - lastVel;
lastVel = currentVel;
if (currentAccel < minAccel) {
......@@ -199,11 +225,11 @@ T rapidStream<T>::minAcceleration() {
}
template<typename T>
T rapidStream<T>::maxAcceleration() {
T rapidStream<T>::maxAcceleration() const {
T maxAccel = std::numeric_limits<T>::lowest();
T lastVel = calcCurrentVel(1);
for (int i = 2; i < windowSize; ++i) {
T currentVel = calcCurrentVel(i);
T currentVel = calcCurrentVel (i);
T currentAccel = currentVel - lastVel;
lastVel = currentVel;
if (currentAccel > maxAccel) {
......
......@@ -24,7 +24,7 @@ public:
* Create a circular buffer with an arbitrary number of elements.
* @param int: number of elements to hold in the buffer
*/
rapidStream(int windowSize);
rapidStream (int windowSize);
~rapidStream();
......@@ -36,82 +36,87 @@ public:
/** Add a value to a circular buffer whose size is defined at creation.
* @param double: value to be pushed into circular buffer.
*/
void pushToWindow(T input);
void pushToWindow (T input);
/** Calculate the first-order difference (aka velocity) between the last two inputs.
* @return double: difference between last two inputs.
*/
T velocity();
T velocity() const;
/** Calculate the second-order difference (aka acceleration) over the last three inputs.