Commit b2884c15 authored by mzed's avatar mzed
Browse files

Revert "adding setNumHiddenNodes"

This reverts commit 36dea78e.
parent 36dea78e
......@@ -8,9 +8,8 @@
#include "seriesClassification.h"
#include "rapidStream.h"
int main(int argc, const char * argv[]) {
rapidStream<double> rapidProcess;
rapidProcess.bayesSetDiffusion(-2.0);
rapidProcess.bayesSetJumpRate(-10.0);
......@@ -22,7 +21,7 @@ int main(int argc, const char * argv[]) {
//std::cout << "bayes: " << bayes <<std::endl;
}
assert( bayes > 0.68 );
//vanAllenTesting
seriesClassification testDTW;
std::vector<trainingSeries> testVector;
......@@ -45,7 +44,6 @@ int main(int argc, const char * argv[]) {
//#define MULTILAYER 1
#ifdef MULTILAYER
regression myNN2;
regression myNNsmall;
std::vector<trainingExample> trainingSet1;
trainingExample tempExample1;
......@@ -60,14 +58,11 @@ int main(int argc, const char * argv[]) {
myNN2.setNumEpochs(50000);
myNN2.train(trainingSet1);
myNNsmall.train(trainingSet1);
std::vector<double> inputVec1 = {1.5, 1.5, 1.5 };
std::cout << "two layers1: " << myNN2.run(inputVec1)[0] <<std::endl;
std::cout << "one layer1: " << myNNsmall.run(inputVec1)[0] <<std::endl;
std::vector<double> inputVec1 = { 2.0, 2.0, 2.0 };
std::cout << myNN2.run(inputVec1)[0] <<std::endl;
myNN2.reset();
myNNsmall.reset();
trainingSet1.clear();
tempExample1.input = {0., 0. };
tempExample1.output = { 0.0 };
......@@ -88,15 +83,12 @@ int main(int argc, const char * argv[]) {
myNN2.train(trainingSet1);
inputVec1 = { 0.9, 0.7 };
std::cout << "two layers2: " << myNN2.run(inputVec1)[0] <<std::endl;
std::cout << "one layer2: " << myNNsmall.run(inputVec1)[0] <<std::endl;
std::cout << myNN2.run(inputVec1)[0] <<std::endl;
#endif
////////////////////////////////////////////////////////////////////////////////
regression myNN;
myNN.setNumHiddenNodes(10);
classification myKnn;
//classification mySVM(classification::svm);
......@@ -400,49 +392,6 @@ int main(int argc, const char * argv[]) {
////////////////////////////////////////////////////////////////////////
//Machine Learning
regression mtofRegression; //Create a machine learning object
mtofRegression.setNumHiddenLayers(3);
//mtofRegression.setNumEpochs(50000);
std::vector<trainingExample> trainingSet_mtof;
trainingExample tempExample_mtof;
//Setting up the first element of training data
tempExample_mtof.input = { 48 };
tempExample_mtof.output = { 130.81 };
trainingSet_mtof.push_back(tempExample_mtof);
//More elements
tempExample_mtof.input = { 54 };
tempExample_mtof.output = { 185.00 };
trainingSet_mtof.push_back(tempExample_mtof);
tempExample_mtof.input = { 60 };
tempExample_mtof.output = { 261.63 };
trainingSet_mtof.push_back(tempExample_mtof);
tempExample_mtof.input = { 66 };
tempExample_mtof.output = { 369.994 };
trainingSet_mtof.push_back(tempExample_mtof);
tempExample_mtof.input = { 72 };
tempExample_mtof.output = { 523.25 };
trainingSet_mtof.push_back(tempExample_mtof);
//Train the machine learning model with the data
mtofRegression.train(trainingSet_mtof);
//Get some user input
int newNote = 0;
std::cout << "Type a MIDI note number.\n"; std::cin >> newNote;
//Run the trained model on the user input
std::vector<double> inputVec_mtof = { double(newNote) };
double freqHz = mtofRegression.run(inputVec_mtof)[0];
std::cout << "MIDI note " << newNote << " is " << freqHz << " Hertz" << std::endl;
return 0;
......
......@@ -210,13 +210,6 @@ int neuralNetwork<T>::getNumHiddenNodes() const {
return numHiddenNodes;
}
template<typename T>
void neuralNetwork<T>::setNumHiddenNodes(int num_hidden_nodes) {
numHiddenLayers = num_hidden_nodes;
reset();
initTrainer();
}
template<typename T>
void neuralNetwork<T>::setEpochs(const int &epochs) {
numEpochs = epochs;
......@@ -452,4 +445,4 @@ void neuralNetwork<T>::updateWeights() {
//explicit instantiation
template class neuralNetwork<double>;
template class neuralNetwork<float>;
template class neuralNetwork<float>;
\ No newline at end of file
......@@ -71,7 +71,6 @@ public:
void setNumHiddenLayers(int num_hidden_layers);
int getNumHiddenNodes() const;
void setNumHiddenNodes(int num_hidden_nodes);
void setEpochs(const int &epochs);
......
......@@ -32,14 +32,13 @@ regressionTemplate<T>::regressionTemplate(const int &num_inputs, const int &num_
modelSet<T>::numOutputs = num_outputs;
numHiddenLayers = 1;
numEpochs = 500;
numHiddenNodes = num_inputs;
modelSet<T>::created = false;
std::vector<int> whichInputs;
for (int i = 0; i < modelSet<T>::numInputs; ++i) {
whichInputs.push_back(i);
}
for (int i = 0; i < modelSet<T>::numOutputs; ++i) {
modelSet<T>::myModelSet.push_back(new neuralNetwork<T>(modelSet<T>::numInputs, whichInputs, numHiddenLayers, numHiddenNodes));
modelSet<T>::myModelSet.push_back(new neuralNetwork<T>(modelSet<T>::numInputs, whichInputs, numHiddenLayers, modelSet<T>::numInputs));
}
modelSet<T>::created = true;
};
......@@ -78,32 +77,6 @@ void regressionTemplate<T>::setNumHiddenLayers(const int &num_hidden_layers){
}
}
template<typename T>
std::vector<int> regressionTemplate<T>::getNumHiddenNodes() {
std::vector<int> vecNumHiddenNodes;
if (std::begin(modelSet<T>::myModelSet) != std::end(modelSet<T>::myModelSet)) {
for (baseModel<T>* model : modelSet<T>::myModelSet) {
neuralNetwork<T>* nnModel = dynamic_cast<neuralNetwork<T>*>(model); //FIXME: I really dislike this design
vecNumHiddenNodes.push_back(nnModel->getNumHiddenNodes());
}
} else {
vecNumHiddenNodes = { numHiddenNodes };
}
return vecNumHiddenNodes;
}
template<typename T>
void regressionTemplate<T>::setNumHiddenNodes(const int &num_hidden_nodes){
numHiddenNodes = num_hidden_nodes;
//Set any existing models
if (std::begin(modelSet<T>::myModelSet) != std::end(modelSet<T>::myModelSet)) {
for (baseModel<T>* model : modelSet<T>::myModelSet) {
neuralNetwork<T>* nnModel = dynamic_cast<neuralNetwork<T>*>(model); //FIXME: I really dislike this design
nnModel->setNumHiddenNodes(num_hidden_nodes);
}
}
}
template<typename T>
void regressionTemplate<T>::setNumEpochs(const int &epochs) {
numEpochs = epochs;
......@@ -144,7 +117,7 @@ bool regressionTemplate<T>::train(const std::vector<trainingExampleTemplate<T> >
whichInputs.push_back(j);
}
for (int i = 0; i < modelSet<T>::numOutputs; ++i) {
modelSet<T>::myModelSet.push_back(new neuralNetwork<T>(modelSet<T>::numInputs, whichInputs, numHiddenLayers, numHiddenNodes));
modelSet<T>::myModelSet.push_back(new neuralNetwork<T>(modelSet<T>::numInputs, whichInputs, numHiddenLayers, modelSet<T>::numInputs));
}
if (numEpochs != 500) {
for (baseModel<T>* model : modelSet<T>::myModelSet) {
......
......@@ -43,16 +43,9 @@ public:
/** Set how many hidden layers are in all models. This feature is temporary, and will be replaced by a different design. */
void setNumHiddenLayers(const int &num_hidden_layers);
/** Check how many hidden nodes are in each model. This feature is temporary, and will be replaced by a different design. */
std::vector<int> getNumHiddenNodes();
/** Set how many hidden layers are in all models. This feature is temporary, and will be replaced by a different design. */
void setNumHiddenNodes(const int &num_hidden_nodes);
private:
int numHiddenLayers; //Temporary -- this should be part of the nn class. -mz
int numEpochs; //Temporary -- also should be part of nn only. -mz
int numHiddenNodes; //Temporary -- also should be part of nn only. -mz
};
//This is here so the old API still works
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment