|
|
@ -4,7 +4,6 @@ |
|
|
|
void ofApp::setup(){ |
|
|
|
/* ofSettings */ |
|
|
|
ofDisableArbTex(); |
|
|
|
ofSetFrameRate(24); |
|
|
|
ofSetVerticalSync(true); |
|
|
|
window_width = ofGetWindowWidth(); |
|
|
|
window_height = ofGetWindowHeight(); |
|
|
@ -30,7 +29,7 @@ void ofApp::setup(){ |
|
|
|
|
|
|
|
yolo.Setup(modelPath2, false, true); |
|
|
|
depth.Setup(modelPath, false, true); |
|
|
|
depth_small.Setup(modelPath4, true, true); |
|
|
|
depth_small.Setup(modelPath4, false, true); |
|
|
|
emotion.Setup(modelPath3, false, true); |
|
|
|
|
|
|
|
/* Depth output fbo */ |
|
|
@ -44,7 +43,9 @@ void ofApp::setup(){ |
|
|
|
|
|
|
|
screen_fbo.allocate(window_width, window_height, GL_RGB); |
|
|
|
|
|
|
|
thread1.setup(&map.fboImage, &model_output_fbo, &depth); |
|
|
|
threadMap.setup(&map.fboImage, &model_output_fbo, &depth); |
|
|
|
threadVideo.setup(&img, &model_output_fbo_1, &depth_small); |
|
|
|
threadYolo.setupYolo(&img, &detected_faces, &yolo, &faceDetector); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -53,9 +54,12 @@ void ofApp::setup(){ |
|
|
|
void ofApp::update(){ |
|
|
|
/* Check to see if the application has moved to the first frame
|
|
|
|
As the models need to load first, as the first inference is quite slow */ |
|
|
|
if(ofGetFrameNum() > 0 && ofGetFrameNum() < 2) |
|
|
|
if(ofGetFrameNum() > 0 && ofGetFrameNum() < 2){ |
|
|
|
firstRun = false; |
|
|
|
thread1.start(); |
|
|
|
threadMap.start(); |
|
|
|
threadVideo.start(); |
|
|
|
threadYolo.start(); |
|
|
|
} |
|
|
|
|
|
|
|
/* Clear detetced face list */ |
|
|
|
detected_faces.clear(); |
|
|
@ -70,35 +74,22 @@ void ofApp::update(){ |
|
|
|
/* Setup model input using ofImage, allocated fbo */ |
|
|
|
player.Update(img); |
|
|
|
|
|
|
|
/* Run Models */ |
|
|
|
/* Run Models, and set pixels */ |
|
|
|
try{ |
|
|
|
|
|
|
|
// // map
|
|
|
|
// inferDepthImage(model_output_fbo, map.fboImage, depth);
|
|
|
|
|
|
|
|
// // video player
|
|
|
|
// inferDepthImage(model_output_fbo_1, img, depth_small);
|
|
|
|
|
|
|
|
// auto output_tensors_face = yolo.Run(model_input_img);
|
|
|
|
|
|
|
|
// auto output_faces = output_tensors_face.front().GetTensorTypeAndShapeInfo().GetShape();
|
|
|
|
threadMap.update(); |
|
|
|
depth.SetPixels(model_output_fbo); |
|
|
|
|
|
|
|
// unsigned int num_anchors = output_faces[1]; // Number of anchors
|
|
|
|
threadVideo.update(); |
|
|
|
depth_small.SetPixels(model_output_fbo_1); |
|
|
|
|
|
|
|
// float* output_face_ptr = output_tensors_face.front().GetTensorMutableData<float>();
|
|
|
|
threadYolo.update(); |
|
|
|
|
|
|
|
// faceDetector.ParseOutput(output_face_ptr, detected_faces, num_anchors);
|
|
|
|
|
|
|
|
// faceDetector.ConvertBoxCoordsToOriginalSize(detected_faces, outFbo.getWidth(), outFbo.getHeight());
|
|
|
|
faceDetector.ConvertBoxCoordsToOriginalSize(detected_faces, model_output_fbo_1.getWidth(), model_output_fbo_1.getHeight()); |
|
|
|
|
|
|
|
/* As no input is generated for the emotion recognition model, run a dummy vector through the model
|
|
|
|
So it can load */ |
|
|
|
if(firstRun){ |
|
|
|
// map
|
|
|
|
inferDepthImage(model_output_fbo, map.fboImage, depth); |
|
|
|
|
|
|
|
// video player
|
|
|
|
inferDepthImage(model_output_fbo_1, img, depth_small); |
|
|
|
/*
|
|
|
|
Create a dummy initial input of batch_size = 5, as |
|
|
|
when initialising the model, it will attempt to create a space in memory for this array. |
|
|
@ -115,9 +106,6 @@ void ofApp::update(){ |
|
|
|
|
|
|
|
} else { |
|
|
|
//inferEmotionalState();
|
|
|
|
thread1.update(); |
|
|
|
// video player
|
|
|
|
inferDepthImage(model_output_fbo_1, img, depth_small); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
@ -146,10 +134,10 @@ void ofApp::draw(){ |
|
|
|
|
|
|
|
renderDepthMap(); |
|
|
|
|
|
|
|
// if(!firstRun && detected_faces.size() != 0){
|
|
|
|
// faceDetector.DrawBox(detected_faces);
|
|
|
|
// faceDetector.DrawCenter(detected_faces);
|
|
|
|
// }
|
|
|
|
if(!firstRun && detected_faces.size() != 0){ |
|
|
|
faceDetector.DrawBox(detected_faces); |
|
|
|
faceDetector.DrawCenter(detected_faces); |
|
|
|
} |
|
|
|
|
|
|
|
ofPushMatrix(); |
|
|
|
ofSetColor(255); |
|
|
|