Commit 50c44a5e authored by Francisco Bernardo's avatar Francisco Bernardo
Browse files

GVF integrated and basic test fixture set-up

parent 34368f86
......@@ -23,28 +23,33 @@ rapidGVF::~rapidGVF()
template<>
bool rapidmix::machineLearning<rapidGVF>::train(const trainingData &newTrainingData) {
return rapidGVF::train(newTrainingData);
}
bool rapidGVF::train(const rapidmix::trainingData &newTrainingData) {
if (newTrainingData.trainingSet.size() <= 1) {
// no recorded phrase (only default one)
if (newTrainingData.trainingSet.size() < 1) {
// no recorded phrase
return false;
}
if (newTrainingData.trainingSet.size() > 1 && newTrainingData.trainingSet[1].elements.size() == 0) {
if (newTrainingData.trainingSet.size() == 1 && newTrainingData.trainingSet[0].elements.size() == 0) {
// empty recorded phrase
return false;
}
if(gvf->getState() != GVF::STATE_LEARNING)
{
gvf->setState(GVF::STATE_LEARNING);
}
//Go through every phrase
for (int h = 0; h < newTrainingData.trainingSet.size(); ++h) {
for (int h = 1; h < newTrainingData.trainingSet.size(); ++h) {
this->gvf->startGesture();
for (int i = 0; i < newTrainingData.trainingSet[h].elements.size(); ++i) {
this->currentGesture.clear();
std::vector<double> vd = newTrainingData.trainingSet[h].elements[i].input;
// Using template <class InputIterator> vector to change for vec<double> to vec<float>
......@@ -58,27 +63,37 @@ bool rapidGVF::train(const rapidmix::trainingData &newTrainingData) {
template<>
std::vector<double> rapidmix::machineLearning<rapidGVF>::run(const std::vector<double> &inputVector) {
return rapidGVF::process(inputVector);
}
std::vector<double> rapidGVF::process(const std::vector<double> &inputVector){
if (inputVector.size() == 0) {
return std::vector<double>();
}
gvf->restart();
if(gvf->getState() != GVF::STATE_FOLLOWING)
{
gvf->setState(GVF::STATE_FOLLOWING);
}
// Using template <class InputIterator> vector to change for vec<double> to vec<float>
std::vector<float> vf(inputVector.begin(),inputVector.end());
this->currentGesture.addObservation(vf);
this->outcomes = this->gvf->update(this->currentGesture.getLastObservation());
std::vector<double> output;
output.insert(output.end(), this->outcomes.likeliestGesture, 1);
output.push_back(this->outcomes.likeliestGesture);
output.insert(output.end(), this->outcomes.likelihoods.begin(), this->outcomes.likelihoods.end());
output.insert(output.end(), this->outcomes.alignments.begin(), this->outcomes.alignments.end());
// output.insert(output.end(), this->outcomes.dynamics.begin(), this->outcomes.dynamics.end());
// output.insert(output.end(), this->outcomes.scalings.begin(), this->outcomes.scalings.end());
// output.insert(output.end(), this->outcomes.scalings.begin(), this->outcomes.scalings.end());
// output.insert(output.end(), this->outcomes.rotations.begin(), this->outcomes.rotations.end());
//
return output;
}
......
......@@ -66,7 +66,6 @@ TEST_CASE( "Tests default GVFGesture ctor.", "[GVF]" ) {
gvf->update(gesture.getLastObservation());
}
// float phase = gvf->getOutcomes().estimations[0].alignment;
// float speed = gvf->getOutcomes().estimations[0].dynamics[0];
//
......@@ -78,6 +77,8 @@ TEST_CASE( "Tests default GVFGesture ctor.", "[GVF]" ) {
}
SCENARIO("Test GVF Regression", "[machineLearning]")
{
GIVEN("gvf object and training dataset")
......@@ -85,155 +86,88 @@ SCENARIO("Test GVF Regression", "[machineLearning]")
rapidmix::gvfTemporalVariation gvf;
rapidmix::trainingData myData;
std::vector<double> input = { 0.2, 0.7 };
std::vector<double> output = { 3.0 };
myData.addElement(input, output);
// Record first gesture
myData.startRecording();
std::vector<double> inputs = { 4.0, 0.7 };
std::vector<double> outputs = { };
myData.addElement(inputs, outputs);
input = { 2.0, 44.2 };
output = { 20.14 };
myData.addElement(input, output);
inputs = { 3.0, 0.8 };
myData.addElement(inputs, outputs);
gvf.train(myData);
inputs = { 2.0, 0.9 };
myData.addElement(inputs, outputs);
inputs = { 1.0, 1.0 };
myData.addElement(inputs, outputs);
inputs = { 0.4, 1.2 };
myData.addElement(inputs, outputs);
inputs = { 0.2, 1.4 };
myData.addElement(inputs, outputs);
myData.stopRecording();
// Record reverse of first gesture
myData.startRecording();
inputs = { 0.2, 1.4 };
myData.addElement(inputs, outputs);
gvf.process(input);
inputs = { 0.4, 1.2 };
myData.addElement(inputs, outputs);
inputs = { 1.0, 1.0 };
myData.addElement(inputs, outputs);
std::string filepath2 = "/var/tmp/modelSetDescription_gvf.json";
// gvf.writeJSON(filepath2);
inputs = { 2.0, 0.9 };
myData.addElement(inputs, outputs);
inputs = { 3.0, 0.8 };
myData.addElement(inputs, outputs);
std::vector<double> inputVec = { 2.0, 44.2 };
inputs = { 4.0, 0.7 };
myData.addElement(inputs, outputs);
myData.stopRecording();
WHEN("when gvf model is read from file")
// Train
gvf.train(myData);
// Set first gesture (or a fragment of it)
std::vector<double> outcomes;
WHEN("when gvf is trained with a gesture and the reverse gesture")
{
THEN("run models and compare")
THEN("follows the the gesture (first) and confirm it is the likeliestGesture and likelihoods reasonable")
{
// REQUIRE(gvf.run(inputVec)[0] == gvfFromFile.run(inputVec)[0]);
outcomes = gvf.process(inputs = { 3.0, 0.8 });
outcomes = gvf.process({ 2.0, 0.9 });
outcomes = gvf.process({ 1.0, 1.0 });
outcomes = gvf.process({ 0.4, 1.2 });
outcomes = gvf.process({ 0.2, 1.4 });
// The assumtion for the test is that the outcome of the last segment of the test gesture must converge
REQUIRE(outcomes[0] == 0); // outcomes[0] - likeliestGesture must be equal to first gesture '0'
REQUIRE(outcomes[1] > 0.5); // outcomes[1] - likelihood gesture '0' must be greater than 50%
REQUIRE(outcomes[2] < 0.5); // outcomes[2] - likelihood gesture '1' must be lesser than 50%
// REQUIRE(outcomes[3] < 0.5); // outcomes[3] - alignment gesture '0' must be lesser than 50%
// REQUIRE(outcomes[4] < 0.5); // outcomes[4] - alignment gesture '1' must be lesser than 50%
}
}
WHEN("when gvf model is read from JSON stream")
WHEN("when gvf is trained with two gestures")
{
THEN("run models and compare")
THEN("gvf follows the test gesture (first gesture scaled) and confirm it is the likeliestGesture and likelihoods anc reasonable ")
{
// REQUIRE(gvf.run(inputVec)[0] == gvfFromString.run(inputVec)[0]);
}
}
}
}
/*
unsigned int runGVF(){
GVFGesture currentGesture;
GVFConfig config;
GVFOutcomes outcomes;
// START UNIT TEST LOOP HERE
// Init GVF
config.inputDimensions = 8;
config.translate = false;
config.segmentation = false;
GVF * gvf = new ofxGVF(config);
// change set parameters (from default)
gvf->setScalingsVariance(0.00001f); //0.00001f); //0.000002f); //0.00004f);
gvf->setDynamicsVariance(0.0001f);
std::vector<std::string> trainingFiles;
// Data structure for training data
vector<vector<vector<float>>> trainingData = readData(trainPath, participantRegExp, trainingFiles);
for (vector<vector<vector<float>>>::iterator trainingGesture = trainingData.begin(); trainingGesture != trainingData.end(); trainingGesture++)
{
currentGesture.clear();
for (vector<vector<float>>::iterator frame = (*trainingGesture).begin(); frame != (*trainingGesture).end(); frame++)
WHEN("when gvf is trained with two gestures")
{
// Fill the template
currentGesture.addObservation(*frame);
}
// Add the template at the end of the gesture
gvf->addGestureTemplate(currentGesture);
}
// Set Following Mode (fitting here)
gvf->setState(ofxGVF::STATE_FOLLOWING);
std::vector<std::string> testFilesList;
vector<GVFOutput> gvfOutputs = readTestData(testPath, participantRegExp, outputPath);
float tempAlignmentDistance = 0.0;
float alignmentDistance;
for (vector<GVFOutput>::iterator gvfOutput = gvfOutputs.begin(); gvfOutput != gvfOutputs.end(); gvfOutput++)
{
// here you restart before testing with an indivudal testing gesture
gvf->restart();
// boost::filesystem::ofstream outfile(gvfOutput->fileOut);
for (vector<vector<float>>::iterator frame = (gvfOutput->testData).begin(); frame != (gvfOutput->testData).end(); frame++) {
currentGesture.addObservation(*frame);
gvf->update(currentGesture.getLastObservation());
ofxGVFOutcomes outcomes = gvf->getOutcomes();
if (outcomes.most_probable >= 0){
outfile << outcomes.estimations[0].scalings[0] << ' ' << outcomes.estimations[0].scalings[1] << ' ' << outcomes.estimations[0].scalings[2] << ' ' << outcomes.estimations[0].scalings[3] << ' ' << outcomes.estimations[0].scalings[4] << ' ' << outcomes.estimations[0].scalings[5] << ' ' << outcomes.estimations[0].scalings[6] << ' ' << outcomes.estimations[0].scalings[7] << ' ' << outcomes.estimations[1].scalings[0] << ' ' << outcomes.estimations[1].scalings[1] << ' ' << outcomes.estimations[1].scalings[2] << ' ' << outcomes.estimations[1].scalings[3] << ' ' << outcomes.estimations[1].scalings[4] << ' ' << outcomes.estimations[1].scalings[5] << ' ' << outcomes.estimations[1].scalings[6] << ' ' << outcomes.estimations[1].scalings[7] << ' ' << outcomes.estimations[2].scalings[0] << ' ' << outcomes.estimations[2].scalings[1] << ' ' << outcomes.estimations[2].scalings[2] << ' ' << outcomes.estimations[2].scalings[3] << ' ' << outcomes.estimations[2].scalings[4] << ' ' << outcomes.estimations[2].scalings[5] << ' ' << outcomes.estimations[2].scalings[6] << ' ' << outcomes.estimations[2].scalings[7] << ' ' << outcomes.estimations[0].dynamics[0] << ' ' << outcomes.estimations[0].dynamics[1] << ' ' << outcomes.estimations[1].dynamics[0] << ' ' << outcomes.estimations[1].dynamics[1] << ' ' << outcomes.estimations[2].dynamics[0] << ' ' << outcomes.estimations[2].dynamics[1] << ' ' << outcomes.estimations[0].alignment << ' ' << outcomes.estimations[1].alignment << ' ' << outcomes.estimations[2].alignment << ' ' << '0' << ' ' << outcomes.estimations[0].likelihood << ' ' << outcomes.estimations[1].likelihood << ' ' << outcomes.estimations[2].likelihood << ' ' << '1' << ' ' << '0' << ' ' << '0' << std::endl;
}
else
THEN("gvf follows the test gesture (first gesture scaled) and confirm it is the likeliestGesture and likelihoods anc reasonable ")
{
outfile << "Error" << endl;
}
// reconstruction and distance computation
int alignmentindex = 0;
if (gvf->getMostProbableGestureIndex()>=0)
alignmentindex = trainingData[gvf->getMostProbableGestureIndex()].size() * outcomes.estimations[gvf->getMostProbableGestureIndex()].alignment;
vector<float> alignedFrame = trainingData[gvf->getMostProbableGestureIndex()][alignmentindex];
float distanceRefAligned = 0.0;
for (int i=0; i<alignedFrame.size(); i++){
alignedFrame[i]*=outcomes.estimations[gvf->getMostProbableGestureIndex()].scalings[i];
distanceRefAligned+=pow(alignedFrame[i]-(*frame)[i],2);
}
tempAlignmentDistance += sqrt(distanceRefAligned);
}
alignmentDistance = tempAlignmentDistance / (float)(gvfOutput->testData).size();
outfile.close();
}
return 0;
}
*/
//TEST_CASE( "Factorials are computed", "[factorial]" ) {
// REQUIRE( Factorial(1) == 1 );
// REQUIRE( Factorial(2) == 2 );
// REQUIRE( Factorial(3) == 6 );
// REQUIRE( Factorial(10) == 3628800 );
//}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment