|
|
@ -30,7 +30,7 @@ void ofApp::setup(){ |
|
|
|
yolo.Setup(modelPath2, false, true); |
|
|
|
depth.Setup(modelPath, false, true); |
|
|
|
depth_small.Setup(modelPath4, false, true); |
|
|
|
emotion.Setup(modelPath3, false, true); |
|
|
|
//emotion.Setup(modelPath3, false, true);
|
|
|
|
|
|
|
|
/* Depth output fbo */ |
|
|
|
model_output_fbo.allocate(window_width / 2, window_height, GL_RGB); |
|
|
@ -46,6 +46,18 @@ void ofApp::setup(){ |
|
|
|
threadMap.setup(&map.fboImage, &model_output_fbo, &depth); |
|
|
|
threadVideo.setup(&img, &model_output_fbo_1, &depth_small); |
|
|
|
threadYolo.setupYolo(&img, &detected_faces, &yolo, &faceDetector); |
|
|
|
|
|
|
|
/*
|
|
|
|
Create a dummy initial input of batch_size = 5, as |
|
|
|
when initialising the model, it will attempt to create a space in memory for this array. |
|
|
|
If the batch_size does change it will completely slow down inference, due to how the cudnn_search_algo is set. |
|
|
|
None of the other search alogithms bar EXHAUSTIVE will work.. no idea why. |
|
|
|
|
|
|
|
for(int i = 0; i < emotionImageMaxBatchSize; i++){ |
|
|
|
tempImage.setFromPixels(emoteImage.getPixels()); |
|
|
|
croppedFaces.push_back(tempImage); |
|
|
|
} |
|
|
|
*/ |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -78,40 +90,18 @@ void ofApp::update(){ |
|
|
|
try{ |
|
|
|
|
|
|
|
threadMap.update(); |
|
|
|
depth.SetPixels(model_output_fbo); |
|
|
|
|
|
|
|
threadVideo.update(); |
|
|
|
depth_small.SetPixels(model_output_fbo_1); |
|
|
|
|
|
|
|
threadYolo.update(); |
|
|
|
|
|
|
|
depth.SetPixels(model_output_fbo); |
|
|
|
depth_small.SetPixels(model_output_fbo_1); |
|
|
|
faceDetector.ConvertBoxCoordsToOriginalSize(detected_faces, model_output_fbo_1.getWidth(), model_output_fbo_1.getHeight()); |
|
|
|
|
|
|
|
/* As no input is generated for the emotion recognition model, run a dummy vector through the model
|
|
|
|
So it can load */ |
|
|
|
if(firstRun){ |
|
|
|
/*
|
|
|
|
Create a dummy initial input of batch_size = 5, as |
|
|
|
when initialising the model, it will attempt to create a space in memory for this array. |
|
|
|
If the batch_size does change it will completely slow down inference, due to how the cudnn_search_algo is set. |
|
|
|
None of the other search alogithms bar EXHAUSTIVE will work.. no idea why. |
|
|
|
*/ |
|
|
|
// for(int i = 0; i < emotionImageMaxBatchSize; i++){
|
|
|
|
// tempImage.setFromPixels(emoteImage.getPixels());
|
|
|
|
// croppedFaces.push_back(tempImage);
|
|
|
|
// }
|
|
|
|
|
|
|
|
// Run model to warmup
|
|
|
|
// auto emotion_output_tensor = emotion.RunBatch(croppedFaces);
|
|
|
|
|
|
|
|
} else { |
|
|
|
//inferEmotionalState();
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
/* Run emotion inference */ |
|
|
|
//inferEmotionalState();
|
|
|
|
So it can load. |
|
|
|
|
|
|
|
auto emotion_output_tensor = emotion.RunBatch(croppedFaces); |
|
|
|
*/ |
|
|
|
} catch (exception e){ |
|
|
|
|
|
|
|
std::cout << "Model did not run" << std::endl; |
|
|
@ -167,7 +157,6 @@ void ofApp::inferDepthImage(ofFbo& fbo, ofImage& img, Onnx& model){ |
|
|
|
|
|
|
|
//--------------------------------------------------------------
|
|
|
|
void ofApp::inferEmotionalState(){ |
|
|
|
|
|
|
|
/*
|
|
|
|
Max faces to process with the model (5) |
|
|
|
*/ |
|
|
@ -188,13 +177,13 @@ void ofApp::inferEmotionalState(){ |
|
|
|
for each image -> set emotional state in detected_faces array |
|
|
|
*/ |
|
|
|
auto emotion_output_tensor = emotion.RunBatch(croppedFaces); |
|
|
|
|
|
|
|
|
|
|
|
auto& output_tensor = emotion_output_tensor.front(); |
|
|
|
auto output_shap = output_tensor.GetTensorTypeAndShapeInfo().GetShape(); |
|
|
|
size_t batch_size = output_shap[0]; // Number of images in the batch
|
|
|
|
size_t num_classes = output_shap[1]; // Number of emotion classes
|
|
|
|
size_t num_classes = 7; //output_shap[1]; // Number of emotion classes
|
|
|
|
|
|
|
|
float* emotional_data = output_tensor.GetTensorMutableData<float>(); |
|
|
|
emotional_data = output_tensor.GetTensorMutableData<float>(); |
|
|
|
|
|
|
|
for (size_t i = 0; i < max_faces_to_process; i++){ |
|
|
|
|
|
|
|