Browse Source

unsure where to go!

tsns-map
cailean 2 weeks ago
parent
commit
b78446bcf7
  1. 4
      src/ModelThread.h
  2. 1
      src/Onnx.cpp
  3. 2
      src/Onnx.h
  4. 2
      src/main.cpp
  5. 53
      src/ofApp.cpp
  6. 2
      src/ofApp.h

4
src/ModelThread.h

@ -12,6 +12,10 @@ class ModelThread : public ofThread
std::vector<types::BoxfWithLandmarks>* detected_faces;
std::string model_type;
// emotional recognition model
std::vector<ofImage>* croppedFaces;
float* emotional_data;
~ModelThread(){
stop();

1
src/Onnx.cpp

@ -52,7 +52,6 @@ void Onnx::Setup(ORTCHAR_T* modelPath, bool isLog, bool useCuda){
// Runs the model, given an image
std::vector<Ort::Value> Onnx::Run(ofImage &img){
std::cout << "hallo" << std::endl;
auto start = std::chrono::high_resolution_clock::now();
TransformImage(img);

2
src/Onnx.h

@ -34,7 +34,7 @@
void DataToFbo(float* data, size_t width, size_t height, ofFbo& fbo);
void Softmax(float* data, size_t size);
void SetPixels(ofFbo& fbo);
bool timeStamp = true;
bool timeStamp = false;
bool log = false;
ofPixels pixels;

2
src/main.cpp

@ -6,7 +6,7 @@ int main( ){
//Use ofGLFWWindowSettings for more options like multi-monitor fullscreen
ofGLWindowSettings settings;
settings.setSize(2000, 1000);
settings.setSize(1920, 720);
settings.setGLVersion(3, 2);
settings.windowMode = OF_WINDOW; //can also be OF_FULLSCREEN

53
src/ofApp.cpp

@ -30,7 +30,7 @@ void ofApp::setup(){
yolo.Setup(modelPath2, false, true);
depth.Setup(modelPath, false, true);
depth_small.Setup(modelPath4, false, true);
emotion.Setup(modelPath3, false, true);
//emotion.Setup(modelPath3, false, true);
/* Depth output fbo */
model_output_fbo.allocate(window_width / 2, window_height, GL_RGB);
@ -46,6 +46,18 @@ void ofApp::setup(){
threadMap.setup(&map.fboImage, &model_output_fbo, &depth);
threadVideo.setup(&img, &model_output_fbo_1, &depth_small);
threadYolo.setupYolo(&img, &detected_faces, &yolo, &faceDetector);
/*
Create a dummy initial input of batch_size = 5, as
when initialising the model, it will attempt to create a space in memory for this array.
If the batch_size does change it will completely slow down inference, due to how the cudnn_search_algo is set.
None of the other search alogithms bar EXHAUSTIVE will work.. no idea why.
for(int i = 0; i < emotionImageMaxBatchSize; i++){
tempImage.setFromPixels(emoteImage.getPixels());
croppedFaces.push_back(tempImage);
}
*/
}
@ -78,40 +90,18 @@ void ofApp::update(){
try{
threadMap.update();
depth.SetPixels(model_output_fbo);
threadVideo.update();
depth_small.SetPixels(model_output_fbo_1);
threadYolo.update();
depth.SetPixels(model_output_fbo);
depth_small.SetPixels(model_output_fbo_1);
faceDetector.ConvertBoxCoordsToOriginalSize(detected_faces, model_output_fbo_1.getWidth(), model_output_fbo_1.getHeight());
/* As no input is generated for the emotion recognition model, run a dummy vector through the model
So it can load */
if(firstRun){
/*
Create a dummy initial input of batch_size = 5, as
when initialising the model, it will attempt to create a space in memory for this array.
If the batch_size does change it will completely slow down inference, due to how the cudnn_search_algo is set.
None of the other search alogithms bar EXHAUSTIVE will work.. no idea why.
*/
// for(int i = 0; i < emotionImageMaxBatchSize; i++){
// tempImage.setFromPixels(emoteImage.getPixels());
// croppedFaces.push_back(tempImage);
// }
// Run model to warmup
// auto emotion_output_tensor = emotion.RunBatch(croppedFaces);
} else {
//inferEmotionalState();
}
/* Run emotion inference */
//inferEmotionalState();
So it can load.
auto emotion_output_tensor = emotion.RunBatch(croppedFaces);
*/
} catch (exception e){
std::cout << "Model did not run" << std::endl;
@ -167,7 +157,6 @@ void ofApp::inferDepthImage(ofFbo& fbo, ofImage& img, Onnx& model){
//--------------------------------------------------------------
void ofApp::inferEmotionalState(){
/*
Max faces to process with the model (5)
*/
@ -188,13 +177,13 @@ void ofApp::inferEmotionalState(){
for each image -> set emotional state in detected_faces array
*/
auto emotion_output_tensor = emotion.RunBatch(croppedFaces);
auto& output_tensor = emotion_output_tensor.front();
auto output_shap = output_tensor.GetTensorTypeAndShapeInfo().GetShape();
size_t batch_size = output_shap[0]; // Number of images in the batch
size_t num_classes = output_shap[1]; // Number of emotion classes
size_t num_classes = 7; //output_shap[1]; // Number of emotion classes
float* emotional_data = output_tensor.GetTensorMutableData<float>();
emotional_data = output_tensor.GetTensorMutableData<float>();
for (size_t i = 0; i < max_faces_to_process; i++){

2
src/ofApp.h

@ -55,6 +55,7 @@ class ofApp : public ofBaseApp{
Emotef emo;
Yolo faceDetector;
float* emotional_data;
std::vector<types::BoxfWithLandmarks> detected_faces;
Map map;
@ -72,4 +73,5 @@ class ofApp : public ofBaseApp{
ModelThread threadMap;
ModelThread threadVideo;
ModelThread threadYolo;
ModelThread threadEmotion;
};

Loading…
Cancel
Save