Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
No results found
Show changes
Showing
with 1177 additions and 0 deletions
#include "ofMain.h"
#include "ofApp.h"
//========================================================================
int main( ){
ofSetupOpenGL(640,480, OF_WINDOW); // <-------- setup the GL context
// this kicks off the running of my app
// can be OF_WINDOW or OF_FULLSCREEN
// pass in width and height too:
ofRunApp( new ofApp());
}
#include "ofApp.h"
//--------------------------------------------------------------
void ofApp::setup(){
ofBackground(0,0,0);
camWidth = 32; // try to grab at this size.
camHeight = 24;
vidGrabber.setVerbose(true);
vidGrabber.setup(camWidth,camHeight);
font.load("Courier New Bold.ttf", 20);
// this set of characters comes from processing:
//http://processing.org/learning/library/asciivideo.html
// changed order slightly to work better for mapping
asciiCharacters = string(" ..,,,'''``--_:;^^**""=+<>iv%&xclrs)/){}I?!][1taeo7zjLunT#@JCwfy325Fp6mqSghVd4EgXPGZbYkOA8U$KHDBWNMR0Q");
ofEnableAlphaBlending();
/*
xcfg.relativeRegularization = 0.1;
myGmm = new rapidmix::xmmStaticClassification(xcfg);
*/
sampleRate = 44100; /* Sampling Rate */
bufferSize = 512; /* Buffer Size. you have to fill this buffer with sound using the for loop in the audioOut method */
gam_1.load(ofToDataPath("261938__digitopia-cdm__saron-sdpl1.wav"));
gam_3.load(ofToDataPath("261730__digitopia-cdm__saron-sdpl3.wav"));
gam_4.load(ofToDataPath("261927__digitopia-cdm__saron-sdpl4.wav"));
gam_5.load(ofToDataPath("261924__digitopia-cdm__saron-sdpl5.wav"));
gam_6.load(ofToDataPath("261968__digitopia-cdm__saron-sdpl6.wav"));
gam_7.load(ofToDataPath("261883__digitopia-cdm__saron-sdpl7.wav"));
ofxMaxiSettings::setup(sampleRate, 2, initialBufferSize);
ofSoundStreamSetup(2,0,this, sampleRate, bufferSize, 4); /* this has to happen at the end of setup - it switches on the DAC */
}
//--------------------------------------------------------------
void ofApp::exit(){
ofSoundStreamStop();
ofSoundStreamClose();
}
//--------------------------------------------------------------
void ofApp::update(){
vidGrabber.update();
}
//--------------------------------------------------------------
void ofApp::draw(){
// change background video alpha value based on the mouse position
float videoAlphaValue = ofMap(mouseX, 0,ofGetWidth(),0,255);
// set a white fill color with the alpha generated above
ofSetColor(255,255,255,videoAlphaValue);
// draw the raw video frame with the alpha value generated above
vidGrabber.draw(0,0,640,480);
ofPixelsRef pixelsRef = vidGrabber.getPixels();
ofSetHexColor(0xffffff);
//-------RAPID-MIX---------------//
std::vector<double> trainingInput;
std::vector<double> trainingOutput;
switch (recordingState) {
case 1:
ofSetColor(255, 0, 0);
break;
case 2:
ofSetColor(0, 255, 0);
break;
case 3:
ofSetColor(0, 0, 255);
break;
default:
ofSetColor(255, 255, 255);
}
for (int i = 0; i < camWidth; ++i){
for (int j = 0; j < camHeight; ++j){
// get the pixel and its lightness (lightness is the average of its RGB values)
float lightness = pixelsRef.getColor(i,j).getLightness();
//RAPIDMIX
trainingInput.push_back(double(lightness));
if (!runToggle) {
// calculate the index of the character from our asciiCharacters array
int character = powf( ofMap(lightness, 0, 255, 0, 1), 2.5) * asciiCharacters.size();
// draw the character at the correct location
font.drawString(ofToString(asciiCharacters[character]), i * 20, j * 20);
}
}
}
if (runToggle) {
result = myKnn.run(trainingInput)[0];
// std::cout << "Gmm " << myGmm.run(trainingInput) << std::endl;
for (int i = 0; i < camWidth; ++i){
for (int j = 0; j < camHeight; ++j){
float lightness = pixelsRef.getColor(i,j).getLightness();
switch (result) {
case 1:
ofSetColor(lightness, 0, 0, lightness);
break;
case 2:
ofSetColor(0, lightness, 0, lightness);
break;
case 3:
ofSetColor(0, 0, lightness, lightness);
break;
default:
ofSetColor(lightness, lightness, lightness, lightness);
}
font.drawString(ofToString(result), i * 20, j * 20);
}
}
}
if (recordingState) {
trainingOutput = { double(recordingState) };
myData.addElement(trainingInput, trainingOutput);
}
}
//--------------------------------------------------------------
void ofApp::audioOut(float * output, int bufferSize, int nChannels) {
for (int i = 0; i < bufferSize; ++i){
currentCount=(int)timer.phasor(8);//this sets up a metronome that ticks 8 times a second
if (lastCount!=currentCount) {//if we have a new timer int this sample, play the sound
//This is a 16-step step sequencer
switch (playHead%16) {
case 1:
gam_4.trigger();
break;
case 5:
if (result == 3 || result == 2) {
gam_6.trigger();
}
break;
case 7:
if (result == 1) {
gam_5.trigger();
}
break;
case 9:
if (result == 1) {
gam_6.trigger();
}
break;
case 11:
if (result == 2) {
gam_7.trigger();
}
break;
case 13:
if (result == 3) {
gam_1.trigger();
}
case 15:
if (result == 2) {
gam_3.trigger();
}
}
++playHead;//iterate the playhead
lastCount=0;//reset the metrotest
}
double gamOutput = gam_1.playOnce() + gam_3.playOnce() + gam_4.playOnce() + gam_5.playOnce() + gam_6.playOnce() + gam_7.playOnce();
mymix.stereo(gamOutput, outputs, 0.5);
output[i*nChannels ] = outputs[0];
output[i*nChannels + 1] = outputs[1];
}
}
//--------------------------------------------------------------
void ofApp::keyPressed (int key){
// in fullscreen mode, on a pc at least, the
// first time video settings the come up
// they come up *under* the fullscreen window
// use alt-tab to navigate to the settings
// window. we are working on a fix for this...
if (key == 's' || key == 'S'){
vidGrabber.videoSettings();
}
switch(key) {
case 49:
recordingState = 1;
myData.startRecording("1");
break;
case 50:
recordingState = 2;
myData.startRecording("2");
break;
case 51:
recordingState = 3;
myData.startRecording("3");
break;
case 32:
runToggle = (runToggle) ? false : true;
if (!runToggle) {
result = 0;
}
}
std::cout << "runToggle " << runToggle << std::endl;
}
//--------------------------------------------------------------
void ofApp::keyReleased(int key){
recordingState = 0;
myData.stopRecording();
if(myData.trainingSet.size() > 0) {
myKnn.train(myData);
myGmm.train(myData);
}
}
//--------------------------------------------------------------
void ofApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void ofApp::mouseDragged(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mousePressed(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mouseReleased(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mouseEntered(int x, int y){
}
//--------------------------------------------------------------
void ofApp::mouseExited(int x, int y){
}
//--------------------------------------------------------------
void ofApp::windowResized(int w, int h){
}
//--------------------------------------------------------------
void ofApp::gotMessage(ofMessage msg){
}
//--------------------------------------------------------------
void ofApp::dragEvent(ofDragInfo dragInfo){
}
#pragma once
#include "ofMain.h"
#include "rapidmix.h"
#include "ofxMaxim.h"
class ofApp : public ofBaseApp{
public:
void setup();
void exit();
void update();
void draw();
void keyPressed(int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void mouseEntered(int x, int y);
void mouseExited(int x, int y);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
ofVideoGrabber vidGrabber;
int camWidth;
int camHeight;
string asciiCharacters;
ofTrueTypeFont font;
//-----RAPID-MIX-----//
rapidmix::trainingData myData;
int recordingState;
rapidmix::staticClassification myKnn;
bool runToggle;
int result;
xmmToolConfig xcfg;
rapidmix::xmmStaticClassification myGmm;
//-----MAXI-----//
int bufferSize;
int initialBufferSize; //MZ: What is this for?
int sampleRate;
void audioOut(float * output, int bufferSize, int nChannels);
double outputs[2];
maxiMix mymix;
maxiOsc timer;
int currentCount;
int lastCount;
int playHead;
maxiSample gam_1, gam_3, gam_4, gam_5, gam_6, gam_7;
};
File added
File added
File added
File added
File added
File added
File added
<group>
<0>100</0>
<1>0</1>
<2>44</2>
<3>0</3>
<4>100</4>
<5>0</5>
<6>45</6>
<7>0</7>
<8>100</8>
<9>0</9>
<10>45</10>
<11>0</11>
</group>
#include "ofMain.h"
#include "ofApp.h"
//========================================================================
int main( ){
ofSetupOpenGL(1024,768,OF_WINDOW); // <-------- setup the GL context
// this kicks off the running of my app
// can be OF_WINDOW or OF_FULLSCREEN
// pass in width and height too:
ofRunApp(new ofApp());
}
#include <array>
#include "ofApp.h"
//--------------------------------------------------------------
void ofApp::setup(){
myo.setup();
//gui
probsClear.addListener(this, &ofApp::probsClearPressed);
resetModel.addListener(this, &ofApp::resetModelPressed);
guiTong.setup("tong");
guiTong.add(tong0.setup("1", 50, 0, 100));
guiTong.add(tong1.setup("2", 0, 0, 100));
guiTong.add(tong2.setup("3", 90, 0, 100));
guiTong.add(tong3.setup("4", 0, 0, 100));
guiTong.add(tong4.setup("5", 90, 0, 100));
guiTong.add(tong5.setup("6", 0, 0, 100));
guiTong.add(tong6.setup("7", 0, 0, 100));
guiTong.add(tong7.setup("8", 60, 0, 100));
guiTong.add(tong8.setup("9", 0, 0, 100));
guiTong.add(tong9.setup("10", 90, 0, 100));
guiTong.add(tong10.setup("11", 0, 0, 100));
guiTong.add(tong11.setup("12", 0, 0, 100));
ofColor green(0, 255, 0);
guiTong.setHeaderBackgroundColor(green);
guiThung.setup("thung", "thung", 250, 10);
guiThung.add(thung0.setup("1", 100, 0, 100));
guiThung.add(thung1.setup("2", 0, 0, 100));
guiThung.add(thung2.setup("3", 10, 0, 100));
guiThung.add(thung3.setup("4", 0, 0, 100));
guiThung.add(thung4.setup("5", 60, 0, 100));
guiThung.add(thung5.setup("6", 0, 0, 100));
guiThung.add(thung6.setup("7", 10, 0, 100));
guiThung.add(thung7.setup("8", 0, 0, 100));
guiThung.add(thung8.setup("9", 60, 0, 100));
guiThung.add(thung9.setup("10", 0, 0, 100));
guiThung.add(thung10.setup("11", 30, 0, 100));
guiThung.add(thung11.setup("12", 0, 0, 100));
ofColor yellow(255, 255, 0);
guiThung.setHeaderBackgroundColor(yellow);
guiGeneral.setup("general", "general", 500, 10);
guiGeneral.add(gain.setup("gain", 1., 0., 1.));
guiGeneral.add(modelControl.setup("run model", false));
guiGeneral.add(resetModel.setup("reset model"));
guiGeneral.add(inputDevice.setup("myo", false));
guiGeneral.add(probsClear.setup("clear"));
//This will make life easier later
allSliders.push_back(tong0);
allSliders.push_back(tong1);
allSliders.push_back(tong2);
allSliders.push_back(tong3);
allSliders.push_back(tong4);
allSliders.push_back(tong5);
allSliders.push_back(tong6);
allSliders.push_back(tong7);
allSliders.push_back(tong8);
allSliders.push_back(tong9);
allSliders.push_back(tong10);
allSliders.push_back(tong11);
allSliders.push_back(thung0);
allSliders.push_back(thung1);
allSliders.push_back(thung2);
allSliders.push_back(thung3);
allSliders.push_back(thung4);
allSliders.push_back(thung5);
allSliders.push_back(thung6);
allSliders.push_back(thung7);
allSliders.push_back(thung8);
allSliders.push_back(thung9);
allSliders.push_back(thung10);
allSliders.push_back(thung11);
//RapidLib
trained = false;
//loading samples
saron_sbpl1.load(ofToDataPath("saron-sbpl1.wav"));
saron_sbpl2.load(ofToDataPath("saron-sbpl2.wav"));
saron_sbpl3.load(ofToDataPath("saron-sbpl3.wav"));
saron_sbpl4.load(ofToDataPath("saron-sbpl4.wav"));
saron_sbpl5.load(ofToDataPath("saron-sbpl5.wav"));
ciblon_tong.load(ofToDataPath("drums-ciblon-medium-tong.wav"));
ciblon_thung.load(ofToDataPath("drums-ciblon-medium-thung.wav"));
//maxi Clock
myClock.setTicksPerBeat(4);//This sets the number of ticks per beat
myClock.setTempo(120);// This sets the tempo in Beats Per Minute
//audio setup
sampleRate = 44100;
bufferSize = 512;
ofxMaxiSettings::setup(sampleRate, 2, initialBufferSize);
ofSoundStreamSetup(2,2,this, sampleRate, bufferSize, 4);
}
//--------------------------------------------------------------
void ofApp::update(){
if (inputDevice) { //Only when Myo is toggled on
//Simple gain control
float emg = myo.getDevices()[0]->getEmgSamples()[4];
streamBuf.pushToWindow(double(emg));
gain = streamBuf.rms() * 0.01;
//Machine learning with quarternions
double myoX = myo.getDevices()[0]->getQuaternion().x();
double myoY = myo.getDevices()[0]->getQuaternion().y();
double myoZ = myo.getDevices()[0]->getQuaternion().z();
double myoW = myo.getDevices()[0]->getQuaternion().w();
if (inputDevice) {
if (recordingState > 0) {
trainingExample tempExample;
tempExample.input = { myoX, myoY, myoZ, myoW };
for (int i = 0; i < allSliders.size() ; ++i) {
tempExample.output.push_back(double(allSliders[i]));
}
trainingSet.push_back(tempExample);
} else if (trained && modelControl == 1) {
std::vector<double> inputVec;
inputVec.push_back(myoX);
inputVec.push_back(myoY);
inputVec.push_back(myoZ);
inputVec.push_back(myoW);
std::vector<double> output = myNN.run(inputVec);
for (int i = 0; i < output.size(); ++i) {
allSliders[i] = int(output[i]);
}
}
}
}
}
//--------------------------------------------------------------
void ofApp::exit(){
myo.stop();
}
//--------------------------------------------------------------
void ofApp::draw(){
//ofClear(0);
ofDrawBitmapString(currentBeat, 100, 300);
ofDrawBitmapString("Hold space to record", 200, 300);
guiTong.draw();
guiThung.draw();
guiGeneral.draw();
if (inputDevice) {
ofSetColor(255);
for ( int i=0; i<myo.getDevices().size(); i++ ) {
stringstream s;
s << "id: " << myo.getDevices()[i]->getId() << endl;
s << "which: " << myo.getDevices()[i]->getWhichArm() << endl;
s << "pose: " << myo.getDevices()[i]->getPose() << endl;
s << "accel: ";
s << myo.getDevices()[i]->getAccel().x << ",";
s << myo.getDevices()[i]->getAccel().y << ",";
s << myo.getDevices()[i]->getAccel().z << endl;
s << "gyro: ";
s << myo.getDevices()[i]->getGyro().x << ",";
s << myo.getDevices()[i]->getGyro().y << ",";
s << myo.getDevices()[i]->getGyro().z << endl;
s << "quaternion: ";
s << myo.getDevices()[i]->getQuaternion().x() << ",";
s << myo.getDevices()[i]->getQuaternion().y() << ",";
s << myo.getDevices()[i]->getQuaternion().z() << ",";
s << myo.getDevices()[i]->getQuaternion().w() << endl;
s << "roll/pitch/yaw: ";
s << myo.getDevices()[i]->getRoll() << ",";
s << myo.getDevices()[i]->getPitch() << ",";
s << myo.getDevices()[i]->getYaw() << endl;
s << "raw data: ";
for ( int j=0; j<8; j++ ) {
s << myo.getDevices()[i]->getEmgSamples()[j];
s << ",";
}
s << endl;
ofSetColor(0);
ofDrawBitmapString(s.str(), 10, 400 + i*100);
}
}
}
//--------------------------------------------------------------
void ofApp::probsClearPressed() {
std::cout << "clearing probs" << std::endl;
for (int i = 0; i < allSliders.size(); ++i) {
allSliders[i] = 0;
}
}
//--------------------------------------------------------------
void ofApp::resetModelPressed() {
std::cout << "resetting models" << std::endl;
myNN.reset();
trainingSet.clear();
modelControl = false;
}
//--------------------------------------------------------------
bool eventTest(int prob) {
int testRand = rand() % 100;
if (testRand < prob) {
return true;
}
return false;
}
//--------------------------------------------------------------
void ofApp::audioOut(float * output, int bufferSize, int nChannels) {
//probs = { tong0, tong1, tong2, tong3, tong4, tong5, tong6, tong7, tong8, tong9, tong10, tong11 };
//probs2 = { thung0, thung1, thung2, thung3, thung4, thung5, thung6, thung7, thung8, thung9, thung10, thung11 };
bool beatsTong[12];
bool beats2[12];
for (int i = 0; i < 12; ++i) {
beatsTong[i] = eventTest(allSliders[i]);
beats2[i] = eventTest(allSliders[i + 12]);
}
int lastCount = 0;
int testMe = 0;
for (int i = 0; i < bufferSize; i++){
myClock.ticker();
if (myClock.tick) {
if (beatsTong[currentBeat]) {
ciblon_tong.trigger();
}
if (beats2[currentBeat]) {
ciblon_thung.trigger();
}
currentBeat = (currentBeat + 1) % 12;
}
outputs[0] = ciblon_tong.playOnce() * gain;
outputs[1] = ciblon_thung.playOnce() * gain;
output[i*nChannels ] = outputs[0];
output[i*nChannels + 1] = outputs[1];
}
}
//--------------------------------------------------------------
void ofApp::keyPressed(int key){
//std::cout << "key: " << key << std::endl;
switch(key) {
case 32:
recordingState = 1;
break;
case 13:
modelControl = (modelControl) ? false : true;
break;
}
}
//--------------------------------------------------------------
void ofApp::keyReleased(int key){
recordingState = 0;
if (trainingSet.size() > 0) {
trained = myNN.train(trainingSet);
std::cout << "trained: " << trained << std::endl;
}
}
//--------------------------------------------------------------
void ofApp::mouseMoved(int x, int y ){
if (inputDevice == false) { //don't do this with Myo is on
if (recordingState > 0) {
trainingExample tempExample;
tempExample.input = { double(x), double(y) };
for (int i = 0; i < allSliders.size() ; ++i) {
tempExample.output.push_back(double(allSliders[i]));
}
trainingSet.push_back(tempExample);
} else if (trained && modelControl == 1) {
std::vector<double> inputVec;
inputVec.push_back(double(x));
inputVec.push_back(double(y));
std::vector<double> output = myNN.run(inputVec);
for (int i = 0; i < output.size(); ++i) {
allSliders[i] = int(output[i]);
}
}
}
}
//--------------------------------------------------------------
void ofApp::mouseDragged(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mousePressed(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mouseReleased(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mouseEntered(int x, int y){
}
//--------------------------------------------------------------
void ofApp::mouseExited(int x, int y){
}
//--------------------------------------------------------------
void ofApp::windowResized(int w, int h){
}
//--------------------------------------------------------------
void ofApp::gotMessage(ofMessage msg){
}
//--------------------------------------------------------------
void ofApp::dragEvent(ofDragInfo dragInfo){
}
#pragma once
#include <vector>
#include "ofMain.h"
#include "ofxGui.h"
#include "ofxMyo.h"
#include "ofxMaxim.h"
#include "regression.h"
#include "rapidStream.h"
class ofApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
void exit();
void keyPressed(int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void mouseEntered(int x, int y);
void mouseExited(int x, int y);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
void probsClearPressed();
void resetModelPressed();
ofxIntSlider thung0, thung1, thung2, thung3;
ofxIntSlider thung4, thung5, thung6, thung7;
ofxIntSlider thung8, thung9, thung10, thung11;
std::vector<ofxIntSlider> thungs;
ofxPanel guiThung;
ofxIntSlider tong0, tong1, tong2, tong3;
ofxIntSlider tong4, tong5, tong6, tong7;
ofxIntSlider tong8, tong9, tong10, tong11;
ofxPanel guiTong;
ofxFloatSlider gain;
ofxToggle inputDevice;
ofxToggle modelControl;
ofxButton resetModel;
ofxButton probsClear;
ofxPanel guiGeneral;
std::vector<ofxIntSlider> allSliders;
//Maxi
void audioOut(float * output, int bufferSize, int nChannels);
//void audioIn(float * input, int bufferSize, int nChannels);
int bufferSize;
int initialBufferSize;
int sampleRate;
private:
ofxMyo::Myo myo;
//---------Maxi---------------//
double outputs[2];
maxiSample saron_sbpl1;
maxiSample saron_sbpl2;
maxiSample saron_sbpl3;
maxiSample saron_sbpl4;
maxiSample saron_sbpl5;
maxiSample ciblon_tong;
maxiSample ciblon_thung;
maxiClock myClock;
int currentBeat;
//---------RapidLib---------------//
regression myNN;
std::vector<trainingExample> trainingSet;
int recordingState;
bool trained;
rapidStream streamBuf = rapidStream(25);
};
#!/bin/bash
#Make sure we've got the latest version
git pull
git submodule update --init --recursive
rm -rf build/
mkdir build
cd build
cmake ..
make
./rapidmixTest
cp ./helloRapidMix ../examples/HelloRapidMix/helloRapidMix
\ No newline at end of file
Copyright (c) 2017 Goldsmiths College University of London
Copyright (c) 2017 by IRCAM – Centre Pompidou, Paris, France
All rights reserved.
The RAPID-MIX API wrapper, in the /src directory, is licenced by the BSD license below. Submodules in the /dependances
folder have their own copyrights and licenses, including MIT, BSD, and GPLv3 licenses. Users are requested to check
individual folders for license details, or to contact RAPID-MIX developers.
BSD 3-clause
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
/**
* @file machineLearning.cpp
* @author Michael Zbyszynski
* @date 10 Jan 2016
* @copyright
* Copyright © 2017 Goldsmiths. All rights reserved.
*/
#include "machineLearning.h"
namespace rapidmix {
//////////////////////////////////////////////////////////////////////// Generic train
template <class MachineLearningModule>
bool machineLearning<MachineLearningModule>::train(const trainingData &newTrainingData) {
return MachineLearningModule::train(newTrainingData);
}
/////////////////////////////////////////////////////////////////////// RapidLib specializations
void trainingData2rapidLib (const trainingData &newTrainingData, std::vector<trainingExample> &trainingSet) {
for (int h = 0; h < newTrainingData.trainingSet.size(); ++h) { //Go through every phrase
for (int i = 0; i < newTrainingData.trainingSet[h].elements.size(); ++i) { //...and every element
trainingExample tempExample;
tempExample.input = newTrainingData.trainingSet[h].elements[i].input;
if (newTrainingData.trainingSet[h].elements[i].output.size() > 0) {
tempExample.output = newTrainingData.trainingSet[h].elements[i].output;
} else {
tempExample.output.push_back(double(h));
}
trainingSet.push_back(tempExample);
}
}
};
/////////////////////////////////////////////////////////////////////// RapidLib classification
template<>
bool machineLearning<classification>::train(const trainingData &newTrainingData) {
std::vector<trainingExample> trainingSet;
labels.clear();
for (int i = 0; i < newTrainingData.trainingSet.size(); ++i) {
labels.push_back(newTrainingData.trainingSet[i].label);
}
trainingData2rapidLib(newTrainingData, trainingSet);
return classification::train(trainingSet);
}
template<>
std::string machineLearning<classification>::run(const std::vector<double> &inputVector, const std::string &label) {
int classIndex = classification::run(inputVector)[0];
return labels[classIndex];
};
/////////////////////////////////////////////////////////////////////// RapidLib regression
template<>
bool machineLearning<regression>::train(const trainingData &newTrainingData) {
std::vector<trainingExample> trainingSet;
trainingData2rapidLib(newTrainingData, trainingSet);
return regression::train(trainingSet);
}
/////////////////////////////////////////////////////////////////////// RapidLib seriesClassification
template<>
bool machineLearning<seriesClassification>::train(const trainingData &newTrainingData) {
std::vector<trainingSeries> seriesSet;
for (int i = 0; i < newTrainingData.trainingSet.size(); ++i) { //each phrase
trainingSeries tempSeries;
tempSeries.label = newTrainingData.trainingSet[i].label;
for (int j = 0; j < newTrainingData.trainingSet[i].elements.size(); ++j) { //each element
tempSeries.input.push_back(newTrainingData.trainingSet[i].elements[j].input);
}
seriesSet.push_back(tempSeries);
}
return seriesClassification::train(seriesSet);
}
template<>
std::string machineLearning<seriesClassification>::run(const std::vector<std::vector<double> > &inputSeries) {
return seriesClassification::run(inputSeries);
}
/////////////////////////////////////////////////////////////////////// GVF
template<>
bool machineLearning<rapidGVF>::train(const trainingData &newTrainingData) {
return rapidGVF::train(newTrainingData);
}
}
/**
* @file machineLearning.h
* @author Michael Zbyszynski on 10 Jan 2016
* @copyright
* Copyright © 2017 Goldsmiths. All rights reserved.
*
* @ingroup machinelearning
*/
#ifndef machineLearning_h
#define machineLearning_h
#include <vector>
#include "../rapidmix.h"
////////// Include all of the machine learning algorithms here
#include "classification.h"
#include "regression.h"
#include "seriesClassification.h"
#include "./rapidXMM/rapidXMM.h"
#include "./rapidGVF/rapidGVF.h"
namespace rapidmix {
// forward declaration
class trainingData;
/** @brief A generic ouptut struct to fit all kinds of models */
typedef struct runResults_t {
std::vector<double> likelihoods;
std::vector<double> regression;
std::vector<double> progressions;
std::string likeliest;
} runResults;
/**
* @brief Host class for machine learning algorithms
*/
template <typename MachineLearningModule>
class machineLearning : public MachineLearningModule {
public:
//* Constructors */
machineLearning() : MachineLearningModule() {};
template<class T>
machineLearning(T type) : MachineLearningModule(type) {};
/**
* @brief This function becomes specialized in the implementation
*/
bool train(const trainingData &newTrainingData);
//* this function is not being specialized
std::vector<double> run(const std::vector<double> &inputVector) {
return MachineLearningModule::run(inputVector);
}
// This is a hack while I think about how to do this. -MZ //
std::string run(const std::vector<double> &inputVector, const std::string &label);
//* This is the one I'm using for DTW */
std::string run(const std::vector<std::vector<double> > &inputSeries);
bool reset() {
return MachineLearningModule::reset();
}
private:
MachineLearningModule module;
//this holds string labels
std::vector<std::string> labels; //FIXME: This probably should be pushed down into rapidLib?
std::string getLabel(int value);
};
////////// typedefs for calling different algorithms
///// RapidLib
/** @brief static classification using KNN from RapidLib */
typedef machineLearning<classification> staticClassification;
/** @brief static regression using Neural Networks from RapidLib */
typedef machineLearning<regression> staticRegression;
/** @brief temporal classification using Dynamic Time Warping from RapidLib */
typedef machineLearning<seriesClassification> dtwTemporalClassification;
///// XMM
/** @brief configuration for XMM based algorithms */
typedef xmmToolConfig xmmConfig;
/** @brief static classification using Gaussian Mixture Models from XMM */
typedef machineLearning<rapidXmmGmm> xmmStaticClassification;
/** @brief static regression using Gaussian Mixture Models from XMM */
typedef machineLearning<rapidXmmGmr> xmmStaticRegression;
/** @brief temporal classification using Hierarchical Hidden Markov Models from XMM */
typedef machineLearning<rapidXmmHmm> xmmTemporalClassification;
/** @brief temporal regression using Hierarchical Hidden Markov Models from XMM */
typedef machineLearning<rapidXmmHmr> xmmTemporalRegression;
///// GVF
/** @brief temporal variation estimation using GVF library */
typedef machineLearning<rapidGVF> gvfTemporalVariation;
}
#endif
//
// rapidGVF.cpp
//
// Created by Francisco on 04/05/2017.
// Copyright © 2017 Goldsmiths. All rights reserved.
//
#include "rapidGVF.h"
#include "../trainingData.h"
rapidGVF::rapidGVF() {}
rapidGVF::~rapidGVF() {}
bool rapidGVF::train(const rapidmix::trainingData &newTrainingData)
{
if (newTrainingData.trainingSet.size() < 1)
{
// no recorded phrase
return false;
}
if (newTrainingData.trainingSet.size() == 1 && newTrainingData.trainingSet[0].elements.size() == 0) {
// empty recorded phrase
return false;
}
if(gvf.getState() != GVF::STATE_LEARNING)
{
gvf.setState(GVF::STATE_LEARNING);
}
//Go through every phrase
for (int h = 0; h < newTrainingData.trainingSet.size(); ++h)
{
gvf.startGesture();
for (int i = 0; i < newTrainingData.trainingSet[h].elements.size(); ++i)
{
std::vector<double> vd = newTrainingData.trainingSet[h].elements[i].input;
// Using template <class InputIterator> vector to change for vec<double> to vec<float>
std::vector<float> vf(vd.begin(), vd.end());
this->currentGesture.addObservation(vf);
}
gvf.addGestureTemplate(this->currentGesture);
}
return true;
}
std::vector<double> rapidGVF::run(const std::vector<double> &inputVector)
{
if (inputVector.size() == 0)
{
return std::vector<double>();
}
gvf.restart();
if (gvf.getState() != GVF::STATE_FOLLOWING)
{
gvf.setState(GVF::STATE_FOLLOWING);
}
// Using template <class InputIterator> vector to change for vec<double> to vec<float>
std::vector<float> vf(inputVector.begin(),inputVector.end());
this->currentGesture.addObservation(vf);
outcomes = gvf.update(this->currentGesture.getLastObservation());
std::vector<double> output;
output.push_back(outcomes.likeliestGesture);
output.insert(output.end(), outcomes.likelihoods.begin(), outcomes.likelihoods.end());
output.insert(output.end(), outcomes.alignments.begin(), outcomes.alignments.end());
return output;
}
const std::vector<float> rapidGVF::getLikelihoods()
{
return outcomes.likelihoods;
};
const std::vector<float> rapidGVF::getAlignments()
{
return outcomes.alignments;
};
const std::vector<std::vector<float> > * rapidGVF::getDynamics()
{
return &outcomes.dynamics;
};
const std::vector<std::vector<float> > * rapidGVF::getScalings()
{
return &outcomes.scalings;
};
const std::vector<std::vector<float> > * rapidGVF::getRotations()
{
return &outcomes.rotations;
};