Commit 13e9722d authored by mzed's avatar mzed
Browse files

beginning to hide templates behind typedef

parent 0c0b73b3
......@@ -10,10 +10,10 @@
int main(int argc, const char * argv[]) {
//////////////////////////////////////////////////////////////////////////////////simple multilayer test
regression<double> myNN2;
regression myNN2;
std::vector<trainingExample<double> > trainingSet1;
trainingExample<double> tempExample1;
std::vector<trainingExample> trainingSet1;
trainingExample tempExample1;
tempExample1.input = { 1.0, 1.0, 1.0 };
tempExample1.output = { 10.0 };
trainingSet1.push_back(tempExample1);
......@@ -55,7 +55,7 @@ int main(int argc, const char * argv[]) {
//////////////////////////////////////////////////////////////////////////////////bug?
regression<double> myNNJS;
regression myNNJS;
trainingSet1.clear();
tempExample1.input = { 8.0 };
......@@ -72,12 +72,12 @@ int main(int argc, const char * argv[]) {
////////////////////////////////////////////////////////////////////////////////
regression<double> myNN;
classification<double> myKnn;
classification<double> mySVM(classification<double>::svm);
regression myNN;
classification myKnn;
classification mySVM(classification::svm);
std::vector<trainingExample<double> > trainingSet;
trainingExample<double> tempExample;
std::vector<trainingExample> trainingSet;
trainingExample tempExample;
tempExample.input = { 0.2, 0.7 };
tempExample.output = { 3.0 };
trainingSet.push_back(tempExample);
......@@ -92,10 +92,10 @@ int main(int argc, const char * argv[]) {
myNN.writeJSON(filepath);
regression<double> myNNfromString;
regression myNNfromString;
myNNfromString.putJSON(myNN.getJSON());
regression<double> myNNfromFile;
regression myNNfromFile;
myNNfromFile.readJSON(filepath);
std::vector<double> inputVec = { 2.0, 44.2 };
......@@ -116,10 +116,10 @@ int main(int argc, const char * argv[]) {
std::string filepath2 = "/var/tmp/modelSetDescription_knn.json";
myKnn.writeJSON(filepath2);
classification<double> myKnnFromString(classification<double>::knn);
classification myKnnFromString(classification::knn);
myKnnFromString.putJSON(myKnn.getJSON());
classification<double> myKnnFromFile;
classification myKnnFromFile;
myKnnFromFile.readJSON(filepath2);
std::cout << "knn before: " << myKnn.run(inputVec)[0] << std::endl;
......@@ -135,8 +135,8 @@ int main(int argc, const char * argv[]) {
assert(myKnn.getK()[0] == 2);
// regression<float> bigVector;
std::vector<trainingExample<float> > trainingSet2;
trainingExample<float> tempExample2;
std::vector<trainingExampleTemplate<float> > trainingSet2;
trainingExampleTemplate<float> tempExample2;
std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(-0.5,0.5);
int vecLength = 64;
......@@ -159,10 +159,10 @@ int main(int argc, const char * argv[]) {
/////////
classification<double> mySVM2(classification<double>::svm);
classification mySVM2(classification::svm);
std::vector<trainingExample<double> > trainingSet3;
trainingExample<double> tempExample3;
std::vector<trainingExample> trainingSet3;
trainingExample tempExample3;
tempExample3.input = { 0., 0. };
tempExample3.output = { 0. };
......
......@@ -23,7 +23,7 @@ class baseModel {
public:
virtual ~baseModel() {};
virtual T run(const std::vector<T> &inputVector) = 0;
virtual void train(const std::vector<trainingExample<T> > &trainingSet) = 0;
virtual void train(const std::vector<trainingExampleTemplate<T> > &trainingSet) = 0;
virtual void reset() = 0;;
virtual int getNumInputs() const = 0;
virtual std::vector<int> getWhichInputs() const = 0;
......
......@@ -14,7 +14,7 @@
#endif
template<typename T>
classification<T>::classification() {
classificationTemplate<T>::classificationTemplate() {
modelSet<T>::numInputs = 0;
modelSet<T>::numOutputs = 0;
modelSet<T>::created = false;
......@@ -22,7 +22,7 @@ classification<T>::classification() {
};
template<typename T>
classification<T>::classification(classificationTypes classification_type) {
classificationTemplate<T>::classificationTemplate(classificationTypes classification_type) {
modelSet<T>::numInputs = 0;
modelSet<T>::numOutputs = 0;
modelSet<T>::created = false;
......@@ -30,7 +30,7 @@ classification<T>::classification(classificationTypes classification_type) {
};
template<typename T>
classification<T>::classification(const int &num_inputs, const int &num_outputs) { //TODO: this feature isn't really useful
classificationTemplate<T>::classificationTemplate(const int &num_inputs, const int &num_outputs) { //TODO: this feature isn't really useful
modelSet<T>::numInputs = num_inputs;
modelSet<T>::numOutputs = num_outputs;
modelSet<T>::created = false;
......@@ -38,7 +38,7 @@ classification<T>::classification(const int &num_inputs, const int &num_outputs)
for (int i = 0; i < modelSet<T>::numInputs; ++i) {
whichInputs.push_back(i);
}
std::vector<trainingExample<T> > trainingSet;
std::vector<trainingExampleTemplate<T> > trainingSet;
for (int i = 0; i < modelSet<T>::numOutputs; ++i) {
modelSet<T>::myModelSet.push_back(new knnClassification<T>(modelSet<T>::numInputs, whichInputs, trainingSet, 1));
}
......@@ -46,7 +46,7 @@ classification<T>::classification(const int &num_inputs, const int &num_outputs)
};
template<typename T>
classification<T>::classification(const std::vector<trainingExample<T> > &trainingSet) {
classificationTemplate<T>::classificationTemplate(const std::vector<trainingExampleTemplate<T> > &trainingSet) {
modelSet<T>::numInputs = 0;
modelSet<T>::numOutputs = 0;
modelSet<T>::created = false;
......@@ -54,7 +54,7 @@ classification<T>::classification(const std::vector<trainingExample<T> > &traini
};
template<typename T>
bool classification<T>::train(const std::vector<trainingExample<T> > &trainingSet) {
bool classificationTemplate<T>::train(const std::vector<trainingExampleTemplate<T> > &trainingSet) {
//TODO: time this process?
modelSet<T>::myModelSet.clear();
//create model(s) here
......@@ -87,7 +87,7 @@ bool classification<T>::train(const std::vector<trainingExample<T> > &trainingSe
}
template<typename T>
std::vector<int> classification<T>::getK() {
std::vector<int> classificationTemplate<T>::getK() {
std::vector<int> kVector;
for (baseModel<T>* model : modelSet<T>::myModelSet) {
knnClassification<T>* kNNModel = dynamic_cast<knnClassification<T>*>(model); //FIXME: I really dislike this design
......@@ -97,11 +97,11 @@ std::vector<int> classification<T>::getK() {
}
template<typename T>
void classification<T>::setK(const int whichModel, const int newK) {
void classificationTemplate<T>::setK(const int whichModel, const int newK) {
knnClassification<T>* kNNModel = dynamic_cast<knnClassification<T>*>(modelSet<T>::myModelSet[whichModel]); //FIXME: I really dislike this design
kNNModel->setK(newK);
}
//explicit instantiation
template class classification<double>;
template class classification<float>;
template class classificationTemplate<double>;
template class classificationTemplate<float>;
......@@ -18,26 +18,26 @@
*/
template<typename T>
class classification : public modelSet<T> {
class classificationTemplate : public modelSet<T> {
public:
enum classificationTypes { knn, svm };
/** with no arguments, just make an empty vector */
classification();
classificationTemplate();
/** speciify classification type */
classification(classificationTypes classificationType);
classificationTemplate(classificationTypes classificationType);
/** create based on training set inputs and outputs */
classification(const std::vector<trainingExample<T> > &trainingSet);
classificationTemplate(const std::vector<trainingExampleTemplate<T> > &trainingSet);
/** create with proper models, but not trained */
classification(const int &numInputs, const int &numOutputs);
classificationTemplate(const int &numInputs, const int &numOutputs);
/** destructor */
~classification() {}
~classificationTemplate() {}
/** Train on a specified set, causes creation if not created */
bool train(const std::vector<trainingExample<T> > &trainingSet);
bool train(const std::vector<trainingExampleTemplate<T> > &trainingSet);
/** Check the K values for each model. This feature is temporary, and will be replaced by a different design. */
std::vector<int> getK();
......@@ -50,4 +50,6 @@ private:
classificationTypes classificationType;
};
typedef classificationTemplate<double> classification; //This is here so that the old API still works as expected.
typedef classificationTemplate<float> classificationFloat;
#endif
......@@ -17,7 +17,7 @@
#endif
template<typename T>
knnClassification<T>::knnClassification(const int &num_inputs, const std::vector<int> &which_inputs, const std::vector<trainingExample<T> > &_neighbours, const int k)
knnClassification<T>::knnClassification(const int &num_inputs, const std::vector<int> &which_inputs, const std::vector<trainingExampleTemplate<T> > &_neighbours, const int k)
: numInputs(num_inputs),
whichInputs(which_inputs),
neighbours(_neighbours),
......@@ -69,13 +69,13 @@ template<typename T>
void knnClassification<T>::addNeighbour(const int &classNum, const std::vector<T> &features) {
std::vector<T> classVec;
classVec.push_back(T(classNum));
trainingExample<T> newNeighbour = {features, classVec};
trainingExampleTemplate<T> newNeighbour = {features, classVec};
neighbours.push_back(newNeighbour);
updateK();
};
template<typename T>
void knnClassification<T>::train(const std::vector<trainingExample<T> > &trainingSet) { //FIXME: Does numInputs need to be reset here? -MZ
void knnClassification<T>::train(const std::vector<trainingExampleTemplate<T> > &trainingSet) { //FIXME: Does numInputs need to be reset here? -MZ
neighbours.clear();
neighbours = trainingSet;
updateK();
......@@ -95,7 +95,7 @@ T knnClassification<T>::run(const std::vector<T> &inputVector) {
//Find k nearest neighbours
int index = 0;
for (typename std::vector<trainingExample<T> >::iterator it = neighbours.begin(); it != neighbours.end(); ++it) {
for (typename std::vector<trainingExampleTemplate<T> >::iterator it = neighbours.begin(); it != neighbours.end(); ++it) {
//find Euclidian distance for this neighbor
T euclidianDistance = 0;
for(int j = 0; j < numInputs ; ++j){
......@@ -156,7 +156,7 @@ void knnClassification<T>::getJSONDescription(Json::Value &jsonModelDescription)
jsonModelDescription["whichInputs"] = this->vector2json(whichInputs);
jsonModelDescription["k"] = desiredK;
Json::Value examples;
for (typename std::vector<trainingExample<T> >::iterator it = neighbours.begin(); it != neighbours.end(); ++it) {
for (typename std::vector<trainingExampleTemplate<T> >::iterator it = neighbours.begin(); it != neighbours.end(); ++it) {
Json::Value oneExample;
oneExample["class"] = it->output[0];
oneExample["features"] = this->vector2json(it->input);
......
......@@ -29,7 +29,7 @@ public:
*/
knnClassification(const int &num_inputs,
const std::vector<int> &which_inputs,
const std::vector<trainingExample<T> > &trainingSet,
const std::vector<trainingExampleTemplate<T> > &trainingSet,
const int k);
~knnClassification();
......@@ -50,7 +50,7 @@ public:
* @param The training set is a vector of training examples that contain both a vector of input values and a value specifying desired output class.
*
*/
void train(const std::vector<trainingExample<T> > &trainingSet);
void train(const std::vector<trainingExampleTemplate<T> > &trainingSet);
/** Reset the model to its empty state. */
void reset();
......@@ -82,7 +82,7 @@ public:
private:
int numInputs;
std::vector<int> whichInputs;
std::vector<trainingExample<T>> neighbours;
std::vector<trainingExampleTemplate<T>> neighbours;
int desiredK; //K that user asked for might be limited but number of examples
int currentK; //K minimum of desiredK or neighbours.size()
inline void updateK();
......
......@@ -36,8 +36,8 @@ modelSet<T>::~modelSet() {
};
template<typename T>
bool modelSet<T>::train(const std::vector<trainingExample<T> > &training_set) {
for (trainingExample<T> example : training_set) {
bool modelSet<T>::train(const std::vector<trainingExampleTemplate<T> > &training_set) {
for (trainingExampleTemplate<T> example : training_set) {
if (example.input.size() != numInputs) {
return false;
}
......@@ -46,13 +46,13 @@ bool modelSet<T>::train(const std::vector<trainingExample<T> > &training_set) {
}
}
for (int i = 0; i < myModelSet.size(); ++i) {
std::vector<trainingExample<T> > modelTrainingSet; //just one output
for (trainingExample<T> example : training_set) {
std::vector<trainingExampleTemplate<T> > modelTrainingSet; //just one output
for (trainingExampleTemplate<T> example : training_set) {
std::vector<T> tempT;
for (int j = 0; j < numInputs; ++j) {
tempT.push_back(example.input[j]);
}
trainingExample<T> tempObj = {tempT, std::vector<T> {example.output[i]}};
trainingExampleTemplate<T> tempObj = {tempT, std::vector<T> {example.output[i]}};
modelTrainingSet.push_back(tempObj);
}
myModelSet[i]->train(modelTrainingSet);
......@@ -211,10 +211,10 @@ void modelSet<T>::json2modelSet(const Json::Value &root) {
myModelSet.push_back(new neuralNetwork<T>(modelNumInputs, whichInputs, numHiddenLayers, numHiddenNodes, weights, wHiddenOutput, inRanges, inBases, outRange, outBase));
} else if (model["modelType"].asString() == "kNN Classificiation") {
std::vector<trainingExample<T> > trainingSet;
std::vector<trainingExampleTemplate<T> > trainingSet;
const Json::Value examples = model["examples"];
for (unsigned int i = 0; i < examples.size(); ++i) {
trainingExample<T> tempExample;
trainingExampleTemplate<T> tempExample;
tempExample.input = json2vector<T>(examples[i]["features"]);
tempExample.output.push_back(examples[i]["class"].asDouble());
trainingSet.push_back(tempExample);
......
......@@ -26,7 +26,7 @@ public:
modelSet();
virtual ~modelSet();
/** Train on a specified set, causes creation if not created */
virtual bool train(const std::vector<trainingExample<T> > &trainingSet);
virtual bool train(const std::vector<trainingExampleTemplate<T> > &trainingSet);
/** reset to pre-training state */
bool reset();
/** run regression or classification for each model */
......
......@@ -344,7 +344,7 @@ T neuralNetwork<T>::run(const std::vector<T> &inputVector) {
}
template<typename T>
void neuralNetwork<T>::train(const std::vector<trainingExample<T > > &trainingSet) {
void neuralNetwork<T>::train(const std::vector<trainingExampleTemplate<T > > &trainingSet) {
initTrainer();
//setup maxes and mins
std::vector<T> inMax = trainingSet[0].input;
......
......@@ -120,7 +120,7 @@ public:
* @param The training set is a vector of training examples that contain both a vector of input values and a value specifying desired output.
*
*/
void train(const std::vector<trainingExample<T> > &trainingSet);
void train(const std::vector<trainingExampleTemplate<T> > &trainingSet);
private:
/** Parameters that influence learning */
......
......@@ -15,7 +15,7 @@
#endif
template<typename T>
regression<T>::regression() {
regressionTemplate<T>::regressionTemplate() {
modelSet<T>::numInputs = 0;
modelSet<T>::numOutputs = 0;
numHiddenLayers = 1;
......@@ -24,7 +24,7 @@ regression<T>::regression() {
};
template<typename T>
regression<T>::regression(const int &num_inputs, const int &num_outputs) {
regressionTemplate<T>::regressionTemplate(const int &num_inputs, const int &num_outputs) {
modelSet<T>::numInputs = num_inputs;
modelSet<T>::numOutputs = num_outputs;
numHiddenLayers = 1;
......@@ -41,7 +41,7 @@ regression<T>::regression(const int &num_inputs, const int &num_outputs) {
};
template<typename T>
regression<T>::regression(const std::vector<trainingExample<T> > &training_set) {
regressionTemplate<T>::regressionTemplate(const std::vector<trainingExampleTemplate<T> > &training_set) {
modelSet<T>::numInputs = 0;
modelSet<T>::numOutputs = 0;
modelSet<T>::created = false;
......@@ -49,7 +49,7 @@ regression<T>::regression(const std::vector<trainingExample<T> > &training_set)
};
template<typename T>
std::vector<int> regression<T>::getNumHiddenLayers() {
std::vector<int> regressionTemplate<T>::getNumHiddenLayers() {
std::vector<int> vecNumHiddenLayers;
if (std::begin(modelSet<T>::myModelSet) != std::end(modelSet<T>::myModelSet)) {
for (baseModel<T>* model : modelSet<T>::myModelSet) {
......@@ -63,7 +63,7 @@ std::vector<int> regression<T>::getNumHiddenLayers() {
}
template<typename T>
void regression<T>::setNumHiddenLayers(const int &num_hidden_layers){
void regressionTemplate<T>::setNumHiddenLayers(const int &num_hidden_layers){
numHiddenLayers = num_hidden_layers;
//Set any existing models
if (std::begin(modelSet<T>::myModelSet) != std::end(modelSet<T>::myModelSet)) {
......@@ -75,7 +75,7 @@ void regression<T>::setNumHiddenLayers(const int &num_hidden_layers){
}
template<typename T>
void regression<T>::setNumEpochs(const int &epochs) {
void regressionTemplate<T>::setNumEpochs(const int &epochs) {
numEpochs = epochs;
//set any existing models
if (std::begin(modelSet<T>::myModelSet) != std::end(modelSet<T>::myModelSet)) {
......@@ -87,7 +87,7 @@ void regression<T>::setNumEpochs(const int &epochs) {
}
template<typename T>
bool regression<T>::train(const std::vector<trainingExample<T> > &training_set) {
bool regressionTemplate<T>::train(const std::vector<trainingExampleTemplate<T> > &training_set) {
//TODO: time this process?
if (training_set.size() > 0) {
if (modelSet<T>::created) {
......@@ -128,5 +128,5 @@ bool regression<T>::train(const std::vector<trainingExample<T> > &training_set)
}
//explicit instantiation
template class regression<double>;
template class regression<float>;
template class regressionTemplate<double>;
template class regressionTemplate<float>;
......@@ -18,20 +18,20 @@
*/
template<typename T>
class regression : public modelSet<T> {
class regressionTemplate : public modelSet<T> {
public:
/** with no arguments, just make an empty vector */
regression();
regressionTemplate();
/** create based on training set inputs and outputs */
regression(const std::vector<trainingExample<T> > &trainingSet);
regressionTemplate(const std::vector<trainingExampleTemplate<T> > &trainingSet);
/** create with proper models, but not trained */
regression(const int &numInputs, const int &numOutputs);
regressionTemplate(const int &numInputs, const int &numOutputs);
/** destructor */
~regression() {};
~regressionTemplate() {};
/** Train on a specified set, causes creation if not created */
bool train(const std::vector<trainingExample<T> > &trainingSet);
bool train(const std::vector<trainingExampleTemplate<T> > &trainingSet);
/** Call before train, to set the number of training epochs */
void setNumEpochs(const int &epochs);
......@@ -47,4 +47,6 @@ private:
int numEpochs; //Temporary -- also should be part of nn only. -mz
};
typedef regressionTemplate<double> regression; //This is here so the old API still works
typedef regressionTemplate<float> regressionFloat;
#endif
......@@ -168,7 +168,7 @@ bool svmClassification<T>::init(
}
template<typename T>
void svmClassification<T>::train(const std::vector<trainingExample<T> > &trainingSet) {
void svmClassification<T>::train(const std::vector<trainingExampleTemplate<T> > &trainingSet) {
//TODO: should be scaling data -1 to 1
//Get normalization parameters
std::vector<T> inMax = trainingSet[0].input;
......
......@@ -64,7 +64,7 @@ public:
* @param The training set is a vector of training examples that contain both a vector of input values and a double specifying desired output class.
*
*/
void train(const std::vector<trainingExample<T> > &trainingSet);
void train(const std::vector<trainingExampleTemplate<T> > &trainingSet);
/** Generate an output value from a single input vector.
* @param A standard vector of doubles to be evaluated.
......
......@@ -15,11 +15,14 @@
/** This is used by both NN and KNN models for training */
template<typename T>
struct trainingExample {
struct trainingExampleTemplate {
std::vector<T> input;
std::vector<T> output;
};
typedef trainingExampleTemplate<double> trainingExample; //This is here to keep the old API working
typedef trainingExampleTemplate<float> trainingExampleFloat;
/** This is used by DTW models for training */
template<typename T>
struct trainingSeries {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment