Commit ac804792 authored by mzed's avatar mzed
Browse files

Merge branch 'dev'

parents b784a0be 33659aa4
......@@ -8,9 +8,8 @@
#include "seriesClassification.h"
#include "rapidStream.h"
int main(int argc, const char * argv[]) {
rapidStream<double> rapidProcess;
rapidProcess.bayesSetDiffusion(-2.0);
rapidProcess.bayesSetJumpRate(-10.0);
......@@ -22,7 +21,7 @@ int main(int argc, const char * argv[]) {
//std::cout << "bayes: " << bayes <<std::endl;
}
assert( bayes > 0.68 );
//vanAllenTesting
seriesClassification testDTW;
std::vector<trainingSeries> testVector;
......@@ -45,7 +44,6 @@ int main(int argc, const char * argv[]) {
//#define MULTILAYER 1
#ifdef MULTILAYER
regression myNN2;
regression myNNsmall;
std::vector<trainingExample> trainingSet1;
trainingExample tempExample1;
......@@ -60,14 +58,11 @@ int main(int argc, const char * argv[]) {
myNN2.setNumEpochs(50000);
myNN2.train(trainingSet1);
myNNsmall.train(trainingSet1);
std::vector<double> inputVec1 = {1.5, 1.5, 1.5 };
std::cout << "two layers1: " << myNN2.run(inputVec1)[0] <<std::endl;
std::cout << "one layer1: " << myNNsmall.run(inputVec1)[0] <<std::endl;
std::vector<double> inputVec1 = { 2.0, 2.0, 2.0 };
std::cout << myNN2.run(inputVec1)[0] <<std::endl;
myNN2.reset();
myNNsmall.reset();
trainingSet1.clear();
tempExample1.input = {0., 0. };
tempExample1.output = { 0.0 };
......@@ -88,15 +83,15 @@ int main(int argc, const char * argv[]) {
myNN2.train(trainingSet1);
inputVec1 = { 0.9, 0.7 };
std::cout << "two layers2: " << myNN2.run(inputVec1)[0] <<std::endl;
std::cout << "one layer2: " << myNNsmall.run(inputVec1)[0] <<std::endl;
std::cout << myNN2.run(inputVec1)[0] <<std::endl;
#endif
////////////////////////////////////////////////////////////////////////////////
regression myNN;
myNN.setNumHiddenNodes(10);
regression myNN_nodes;
myNN_nodes.setNumHiddenNodes(10);
assert(myNN_nodes.getNumHiddenNodes()[0] == 10);
classification myKnn;
//classification mySVM(classification::svm);
......@@ -111,6 +106,7 @@ int main(int argc, const char * argv[]) {
trainingSet.push_back(tempExample);
myNN.train(trainingSet);
myNN_nodes.train(trainingSet);
// std::cout << myNN.getJSON() << std::endl;
std::string filepath = "/var/tmp/modelSetDescription.json";
myNN.writeJSON(filepath);
......@@ -129,6 +125,7 @@ int main(int argc, const char * argv[]) {
//std::cout << "from file: " << myNNfromFile.run(inputVec)[0] << std::endl;
assert(myNN.run(inputVec)[0] == 20.14);
assert(myNN_nodes.run(inputVec)[0] == 20.14);
//assert(myNN.run(inputVec)[0] == myNNfromString.run(inputVec)[0]);
//assert(myNN.run(inputVec)[0] == myNNfromFile.run(inputVec)[0]);
......@@ -400,49 +397,6 @@ int main(int argc, const char * argv[]) {
////////////////////////////////////////////////////////////////////////
//Machine Learning
regression mtofRegression; //Create a machine learning object
mtofRegression.setNumHiddenLayers(3);
//mtofRegression.setNumEpochs(50000);
std::vector<trainingExample> trainingSet_mtof;
trainingExample tempExample_mtof;
//Setting up the first element of training data
tempExample_mtof.input = { 48 };
tempExample_mtof.output = { 130.81 };
trainingSet_mtof.push_back(tempExample_mtof);
//More elements
tempExample_mtof.input = { 54 };
tempExample_mtof.output = { 185.00 };
trainingSet_mtof.push_back(tempExample_mtof);
tempExample_mtof.input = { 60 };
tempExample_mtof.output = { 261.63 };
trainingSet_mtof.push_back(tempExample_mtof);
tempExample_mtof.input = { 66 };
tempExample_mtof.output = { 369.994 };
trainingSet_mtof.push_back(tempExample_mtof);
tempExample_mtof.input = { 72 };
tempExample_mtof.output = { 523.25 };
trainingSet_mtof.push_back(tempExample_mtof);
//Train the machine learning model with the data
mtofRegression.train(trainingSet_mtof);
//Get some user input
int newNote = 0;
std::cout << "Type a MIDI note number.\n"; std::cin >> newNote;
//Run the trained model on the user input
std::vector<double> inputVec_mtof = { double(newNote) };
double freqHz = mtofRegression.run(inputVec_mtof)[0];
std::cout << "MIDI note " << newNote << " is " << freqHz << " Hertz" << std::endl;
return 0;
......
......@@ -10,7 +10,7 @@
"use strict";
console.log("RapidLib 11.11.2017 11:01");
console.log("RapidLib 05.12.2017 14:05");
/**
* Utility function to convert js objects into C++ trainingSets
......@@ -122,6 +122,26 @@ Module.Regression.prototype = {
setNumHiddenLayers: function (numHiddenLayers) {
this.modelSet.setNumHiddenLayers(numHiddenLayers);
},
/**
* Returns the number of hidden nodes in a MLP.
* @returns {Number} hidden node values
*/
getNumHiddenNodes: function () {
let outputVector = this.modelSet.getNumHiddenNodes();
//change back to javascript array
let output = [];
for (let i = 0; i < outputVector.size(); ++i) {
output.push(outputVector.get(i));
}
return output[0];
},
/**
* Sets the number of hidden nodes for an MLP.
* @param {Number} numHiddenNodes
*/
setNumHiddenNodes: function (numHiddenNodes) {
this.modelSet.setNumHiddenNodes(numHiddenNodes);
},
/**
* Sets the number of epochs for MLP training.
* @param {Number} numEpochs
......
......@@ -21,7 +21,10 @@ EMSCRIPTEN_BINDINGS(regression_module) {
.function("train", &regressionTemplate<double>::train)
.function("getNumHiddenLayers", &regressionTemplate<double>::getNumHiddenLayers)
.function("setNumHiddenLayers", &regressionTemplate<double>::setNumHiddenLayers)
.function("getNumEpochs", &regressionTemplate<double>::getNumEpochs)
.function("setNumEpochs", &regressionTemplate<double>::setNumEpochs)
.function("getNumHiddenNodes", &regressionTemplate<double>::getNumHiddenNodes)
.function("setNumHiddenNodes", &regressionTemplate<double>::setNumHiddenNodes)
;
};
......
......@@ -212,11 +212,17 @@ int neuralNetwork<T>::getNumHiddenNodes() const {
template<typename T>
void neuralNetwork<T>::setNumHiddenNodes(int num_hidden_nodes) {
numHiddenLayers = num_hidden_nodes;
numHiddenNodes = num_hidden_nodes;
reset();
initTrainer();
}
template<typename T>
int neuralNetwork<T>::getEpochs() const {
return numEpochs;
}
template<typename T>
void neuralNetwork<T>::setEpochs(const int &epochs) {
numEpochs = epochs;
......
......@@ -73,6 +73,7 @@ public:
int getNumHiddenNodes() const;
void setNumHiddenNodes(int num_hidden_nodes);
int getEpochs() const;
void setEpochs(const int &epochs);
std::vector<T> getWeights() const;
......
......@@ -22,6 +22,7 @@ regressionTemplate<T>::regressionTemplate() {
modelSet<T>::numInputs = -1;
modelSet<T>::numOutputs = -1;
numHiddenLayers = 1;
numHiddenNodes = 0; //this will be changed by training
numEpochs = 500;
modelSet<T>::created = false;
};
......@@ -53,7 +54,7 @@ regressionTemplate<T>::regressionTemplate(const std::vector<trainingExampleTempl
};
template<typename T>
std::vector<int> regressionTemplate<T>::getNumHiddenLayers() {
std::vector<int> regressionTemplate<T>::getNumHiddenLayers() const {
std::vector<int> vecNumHiddenLayers;
if (std::begin(modelSet<T>::myModelSet) != std::end(modelSet<T>::myModelSet)) {
for (baseModel<T>* model : modelSet<T>::myModelSet) {
......@@ -79,7 +80,7 @@ void regressionTemplate<T>::setNumHiddenLayers(const int &num_hidden_layers){
}
template<typename T>
std::vector<int> regressionTemplate<T>::getNumHiddenNodes() {
std::vector<int> regressionTemplate<T>::getNumHiddenNodes() const {
std::vector<int> vecNumHiddenNodes;
if (std::begin(modelSet<T>::myModelSet) != std::end(modelSet<T>::myModelSet)) {
for (baseModel<T>* model : modelSet<T>::myModelSet) {
......@@ -104,6 +105,20 @@ void regressionTemplate<T>::setNumHiddenNodes(const int &num_hidden_nodes){
}
}
template<typename T>
std::vector<int> regressionTemplate<T>::getNumEpochs() const {
std::vector<int> vecEpochs;
if (std::begin(modelSet<T>::myModelSet) != std::end(modelSet<T>::myModelSet)) {
for (baseModel<T>* model : modelSet<T>::myModelSet) {
neuralNetwork<T>* nnModel = dynamic_cast<neuralNetwork<T>*>(model); //FIXME: I really dislike this design
vecEpochs.push_back(nnModel->getEpochs());
}
} else {
vecEpochs = { numEpochs };
}
return vecEpochs;
}
template<typename T>
void regressionTemplate<T>::setNumEpochs(const int &epochs) {
numEpochs = epochs;
......@@ -139,6 +154,9 @@ bool regressionTemplate<T>::train(const std::vector<trainingExampleTemplate<T> >
return false;
}
}
if(numHiddenNodes == 0) { //not yet set
numHiddenNodes = modelSet<T>::numInputs;
}
std::vector<int> whichInputs;
for (int j = 0; j < modelSet<T>::numInputs; ++j) {
whichInputs.push_back(j);
......@@ -154,8 +172,9 @@ bool regressionTemplate<T>::train(const std::vector<trainingExampleTemplate<T> >
}
modelSet<T>::created = true;
timer = clock() - timer;
bool result = modelSet<T>::train(training_set);
std::cout << "Regression trained in " << (float)timer/CLOCKS_PER_SEC << " ms." << std::endl;
return modelSet<T>::train(training_set);
return result;
}
throw std::length_error("empty training set.");
return false;
......
......@@ -34,17 +34,20 @@ public:
/** Train on a specified set, causes creation if not created */
bool train(const std::vector<trainingExampleTemplate<T> > &trainingSet);
/** Check how many training epochs each model will run. This feature is temporary, and will be replaced by a different design. */
std::vector<int> getNumEpochs() const;
/** Call before train, to set the number of training epochs */
void setNumEpochs(const int &epochs);
/** Check how many hidden layers are in each model. This feature is temporary, and will be replaced by a different design. */
std::vector<int> getNumHiddenLayers();
std::vector<int> getNumHiddenLayers() const;
/** Set how many hidden layers are in all models. This feature is temporary, and will be replaced by a different design. */
void setNumHiddenLayers(const int &num_hidden_layers);
/** Check how many hidden nodes are in each model. This feature is temporary, and will be replaced by a different design. */
std::vector<int> getNumHiddenNodes();
std::vector<int> getNumHiddenNodes() const;
/** Set how many hidden layers are in all models. This feature is temporary, and will be replaced by a different design. */
void setNumHiddenNodes(const int &num_hidden_nodes);
......
......@@ -79,7 +79,7 @@ let testSet4 = [
},
{
input: [2.0, 2.0, 2.0],
output: [1.3]
output: [1.3]
},
];
......@@ -139,6 +139,14 @@ describe('RapidLib Machine Learning', function () {
let response3 = myRegression.run(0.9, 0.7); //likes a list as well as an array
expect(response3[0]).to.equal(1.6932444207337964);
});
it('should get and set hidden nodes, and still return good results', function () {
let myRegNodes = new rapidMix.Regression();
myRegNodes.setNumHiddenNodes(12);
expect(myRegNodes.getNumHiddenNodes()).to.equal(12);
myRegNodes.train(testSet);
let response2 = myRegression.run([0.2789, 0.4574]);
expect(response2[0]).to.equal(0.6737399669524929);
});
it('should return zero on input that doesn\'t match numInputs', function () {
let response1 = myRegression.run([33, 2, 44, 9]);
expect(response1[0]).to.equal(0);
......@@ -447,7 +455,7 @@ describe('RapidLib Signal Processing', function () {
expect(velStream.maxAcceleration()).to.equal(45.63999999999999);
});
});
describe('when streaming to bayesFilter', function() {
describe('when streaming to bayesFilter', function () {
let bf = new rapidMix.StreamBuffer();
bf.bayesSetDiffusion(-2.0);
bf.bayesSetJumpRate(-5.0);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment