|
|
@ -19,7 +19,7 @@ void ofApp::setup(){ |
|
|
|
|
|
|
|
/* setup video */ |
|
|
|
player.Setup(); |
|
|
|
player.SetVideo("videos/demo.mp4", model_output_fbo_1); |
|
|
|
player.SetVideoOnAwake("videos/demo.mp4", model_output_fbo_1); |
|
|
|
player.SetFrame("demo.jpg"); |
|
|
|
|
|
|
|
/* setup models (modelPath, log, useCuda) */ |
|
|
@ -28,9 +28,9 @@ void ofApp::setup(){ |
|
|
|
ORTCHAR_T* modelPath3 = "/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/apps/myApps/onnx-test/bin/data/rgb_emotion.onnx"; |
|
|
|
ORTCHAR_T* modelPath4 = "/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/apps/myApps/onnx-test/bin/data/depth_anything_v2_vits.onnx"; |
|
|
|
|
|
|
|
yolo.Setup(modelPath2, false, true); |
|
|
|
depth.Setup(modelPath, false, true); |
|
|
|
depth_small.Setup(modelPath4, false, true); |
|
|
|
//yolo.Setup(modelPath2, false, true);
|
|
|
|
depth.Setup(modelPath, true, true); |
|
|
|
depth_small.Setup(modelPath, true, true); |
|
|
|
//emotion.Setup(modelPath3, false, true);
|
|
|
|
|
|
|
|
/* Depth output fbo */ |
|
|
@ -46,7 +46,7 @@ void ofApp::setup(){ |
|
|
|
|
|
|
|
threadMap.setup(&map.fboImage, &model_output_fbo, &depth); |
|
|
|
threadVideo.setup(&img, &model_output_fbo_1, &depth_small); |
|
|
|
threadYolo.setupYolo(&img, &detected_faces, &yolo, &faceDetector); |
|
|
|
//threadYolo.setupYolo(&img, &detected_faces, &yolo, &faceDetector);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Create a dummy initial input of batch_size = 5, as |
|
|
@ -75,11 +75,11 @@ void ofApp::update(){ |
|
|
|
|
|
|
|
/* Check to see if the application has moved to the first frame
|
|
|
|
As the models need to load first, as the first inference is quite slow */ |
|
|
|
if(ofGetFrameNum() > 0 && ofGetFrameNum() < 2){ |
|
|
|
if(ofGetFrameNum() < 2){ |
|
|
|
firstRun = false; |
|
|
|
threadMap.start(); |
|
|
|
threadVideo.start(); |
|
|
|
threadYolo.start(); |
|
|
|
//threadYolo.start();
|
|
|
|
} |
|
|
|
|
|
|
|
/* Clear detetced face list */ |
|
|
@ -100,11 +100,11 @@ void ofApp::update(){ |
|
|
|
|
|
|
|
threadMap.update(); |
|
|
|
threadVideo.update(); |
|
|
|
threadYolo.update(); |
|
|
|
//threadYolo.update();
|
|
|
|
|
|
|
|
depth.SetPixels(model_output_fbo); |
|
|
|
depth_small.SetPixels(model_output_fbo_1); |
|
|
|
faceDetector.ConvertBoxCoordsToOriginalSize(detected_faces, model_output_fbo_1.getWidth(), model_output_fbo_1.getHeight()); |
|
|
|
//faceDetector.ConvertBoxCoordsToOriginalSize(detected_faces, model_output_fbo_1.getWidth(), model_output_fbo_1.getHeight());
|
|
|
|
|
|
|
|
/* As no input is generated for the emotion recognition model, run a dummy vector through the model
|
|
|
|
So it can load. |
|
|
@ -133,10 +133,10 @@ void ofApp::draw(){ |
|
|
|
|
|
|
|
renderDepthMap(); |
|
|
|
|
|
|
|
if(!firstRun && detected_faces.size() != 0){ |
|
|
|
faceDetector.DrawBox(detected_faces); |
|
|
|
faceDetector.DrawCenter(detected_faces); |
|
|
|
} |
|
|
|
// if(!firstRun && detected_faces.size() != 0){
|
|
|
|
// faceDetector.DrawBox(detected_faces);
|
|
|
|
// faceDetector.DrawCenter(detected_faces);
|
|
|
|
// }
|
|
|
|
|
|
|
|
ofPushMatrix(); |
|
|
|
ofSetColor(255); |
|
|
@ -225,6 +225,8 @@ void ofApp::printEmotions(){ |
|
|
|
void ofApp::displayFrame(){ |
|
|
|
/* set frame from recent vantage-point tree response */ |
|
|
|
player.SetFrame(server->vp_resp.image); |
|
|
|
/* set video from recent vantage-point tree resp */ |
|
|
|
player.SetVideo(server->vp_resp.video, server->vp_resp.frame, server->is_active); |
|
|
|
/* set image for model input, checks if the clients are active -> yes-frame no-video */ |
|
|
|
player.Update(img, server->is_active); |
|
|
|
} |
|
|
|