Browse Source

working threads

tsns-map
cailean 3 months ago
parent
commit
99df9a29fb
  1. 3
      onnx-test.code-workspace
  2. 41
      src/ModelThread.h
  3. 7
      src/Onnx.cpp
  4. 2
      src/Onnx.h
  5. 6
      src/Yolo.cpp
  6. 52
      src/ofApp.cpp
  7. 4
      src/ofApp.h

3
onnx-test.code-workspace

@ -101,7 +101,8 @@
"__hash_table": "cpp",
"__split_buffer": "cpp",
"__tree": "cpp",
"filesystem": "cpp"
"filesystem": "cpp",
"__mutex_base": "cpp"
}
}
}

41
src/ModelThread.h

@ -1,5 +1,6 @@
#include "ofMain.h"
#include "Onnx.h"
#include "Yolo.h"
class ModelThread : public ofThread
{
@ -7,6 +8,9 @@ class ModelThread : public ofThread
ofImage* img;
ofFbo* fbo;
Onnx* model;
Yolo* yolo;
std::vector<types::BoxfWithLandmarks>* detected_faces;
std::string model_type;
~ModelThread(){
@ -19,6 +23,16 @@ class ModelThread : public ofThread
this->img = _img;
this->fbo = _fbo;
this->model = _model;
this->model_type = "depth";
}
void setupYolo(ofImage* _img, std::vector<types::BoxfWithLandmarks>* _detected_faces, Onnx* _model, Yolo* _yolo){
std::lock_guard<std::mutex> lock(mutex);
this->img = _img;
this->detected_faces = _detected_faces;
this->model_type = "yolo";
this->model = _model;
this->yolo = _yolo;
}
void start(){
@ -32,9 +46,16 @@ class ModelThread : public ofThread
void threadedFunction(){
while(isThreadRunning()){
if(model_type == "depth"){
std::unique_lock<std::mutex> lock(mutex);
inferDepthImage(fbo, img, model);
condition.wait(lock);
} else if(model_type == "yolo") {
std::unique_lock<std::mutex> lock(mutex);
inferYolo();
condition.wait(lock);
}
}
}
@ -43,25 +64,29 @@ class ModelThread : public ofThread
condition.notify_one();
}
void inferYolo(){
auto output_tensors_face = model->Run(*img);
auto output_faces = output_tensors_face.front().GetTensorTypeAndShapeInfo().GetShape();
unsigned int num_anchors = output_faces[1]; // Number of anchors
float* output_face_ptr = output_tensors_face.front().GetTensorMutableData<float>();
yolo->ParseOutput(output_face_ptr, *detected_faces, num_anchors);
}
void inferDepthImage(ofFbo* fbo, ofImage* img, Onnx* model){
std::cout << "infer" << std::endl;
auto output_tensors = model->Run(*img);
float* output_ptr = output_tensors.front().GetTensorMutableData<float>();
size_t num_elements = output_tensors.front().GetTensorTypeAndShapeInfo().GetElementCount();
std::cout << "done" << std::endl;
float min_value = model->ReduceMin(output_ptr, num_elements);
float max_value = model->ReduceMax(output_ptr, num_elements);
std::cout << "done1" << std::endl;
model->Normalize(output_ptr, num_elements, min_value, max_value);
std::cout << "don2e" << std::endl;
model->DataToFbo(output_ptr, 518, 518, *fbo);
std::cout << "done3" << std::endl;
}

7
src/Onnx.cpp

@ -230,7 +230,6 @@ void Onnx::Normalize(float* data, size_t size, float min_value, float max_value)
// Coverts the output tensor data to a texture of a given ofFbo.
void Onnx::DataToFbo(float* data, size_t width, size_t height, ofFbo& fbo){
// Convert data into opencv mat
//cv::Mat inputMat(height, width, CV_32FC1, const_cast<float*>(data));
cv::Mat inputMat(height, width, CV_32FC1);
memcpy(inputMat.data, data, width * height * sizeof(float));
// // Convert to 8-bit grayscale Mat
@ -242,19 +241,17 @@ void Onnx::DataToFbo(float* data, size_t width, size_t height, ofFbo& fbo){
cv::resize(inputMat8U, resizedMat, cv::Size(fbo.getWidth(), fbo.getHeight()), 0, 0, cv::INTER_LINEAR);
// // Convert OpenCV Mat to ofPixels
ofPixels pixels;
pixels.allocate(fbo.getWidth(), fbo.getHeight(), OF_PIXELS_GRAY);
// // Copy data from resizedMat to ofPixels
memcpy(pixels.getData(), resizedMat.data, fbo.getWidth() * fbo.getHeight());
}
// // Update FBO with new pixels
void Onnx::SetPixels(ofFbo& fbo){
fbo.begin();
ofTexture& texture = fbo.getTexture();
texture.loadData(pixels);
fbo.end();
}
void Onnx::Softmax(float* data, size_t size) {

2
src/Onnx.h

@ -33,8 +33,10 @@
void Normalize(float* data, size_t size, float min_value, float max_value);
void DataToFbo(float* data, size_t width, size_t height, ofFbo& fbo);
void Softmax(float* data, size_t size);
void SetPixels(ofFbo& fbo);
bool timeStamp = true;
bool log = false;
ofPixels pixels;
protected:
Ort::Env ort_env;

6
src/Yolo.cpp

@ -47,14 +47,16 @@ void Yolo::ParseOutput(float* &output_tensors, std::vector<types::BoxfWithLandma
void Yolo::DrawBox(std::vector<types::BoxfWithLandmarks> &detected_faces){
for (const auto &face : detected_faces) {
ofNoFill();
ofDrawRectangle(face.box.x1, face.box.y1, face.box.x2 - face.box.x1, face.box.y2 - face.box.y1);
float w = ofGetWindowWidth() / 2;
ofDrawRectangle(face.box.x1 + w, face.box.y1, ((face.box.x2 + w) - (face.box.x1 + w)), face.box.y2 - face.box.y1);
}
}
// Simple helper to draw boxes at the center of the detected face.
void Yolo::DrawCenter(std::vector<types::BoxfWithLandmarks> &detected_faces){
ofNoFill();
ofDrawCircle(detected_faces[0].box.center, 5);
glm::vec2 position = detected_faces[0].box.center;
ofDrawCircle(position.x + ofGetWindowWidth() / 2, position.y, 5);
}

52
src/ofApp.cpp

@ -4,7 +4,6 @@
void ofApp::setup(){
/* ofSettings */
ofDisableArbTex();
ofSetFrameRate(24);
ofSetVerticalSync(true);
window_width = ofGetWindowWidth();
window_height = ofGetWindowHeight();
@ -30,7 +29,7 @@ void ofApp::setup(){
yolo.Setup(modelPath2, false, true);
depth.Setup(modelPath, false, true);
depth_small.Setup(modelPath4, true, true);
depth_small.Setup(modelPath4, false, true);
emotion.Setup(modelPath3, false, true);
/* Depth output fbo */
@ -44,7 +43,9 @@ void ofApp::setup(){
screen_fbo.allocate(window_width, window_height, GL_RGB);
thread1.setup(&map.fboImage, &model_output_fbo, &depth);
threadMap.setup(&map.fboImage, &model_output_fbo, &depth);
threadVideo.setup(&img, &model_output_fbo_1, &depth_small);
threadYolo.setupYolo(&img, &detected_faces, &yolo, &faceDetector);
}
@ -53,9 +54,12 @@ void ofApp::setup(){
void ofApp::update(){
/* Check to see if the application has moved to the first frame
As the models need to load first, as the first inference is quite slow */
if(ofGetFrameNum() > 0 && ofGetFrameNum() < 2)
if(ofGetFrameNum() > 0 && ofGetFrameNum() < 2){
firstRun = false;
thread1.start();
threadMap.start();
threadVideo.start();
threadYolo.start();
}
/* Clear detetced face list */
detected_faces.clear();
@ -70,35 +74,22 @@ void ofApp::update(){
/* Setup model input using ofImage, allocated fbo */
player.Update(img);
/* Run Models */
/* Run Models, and set pixels */
try{
// // map
// inferDepthImage(model_output_fbo, map.fboImage, depth);
// // video player
// inferDepthImage(model_output_fbo_1, img, depth_small);
// auto output_tensors_face = yolo.Run(model_input_img);
// auto output_faces = output_tensors_face.front().GetTensorTypeAndShapeInfo().GetShape();
threadMap.update();
depth.SetPixels(model_output_fbo);
// unsigned int num_anchors = output_faces[1]; // Number of anchors
threadVideo.update();
depth_small.SetPixels(model_output_fbo_1);
// float* output_face_ptr = output_tensors_face.front().GetTensorMutableData<float>();
threadYolo.update();
// faceDetector.ParseOutput(output_face_ptr, detected_faces, num_anchors);
// faceDetector.ConvertBoxCoordsToOriginalSize(detected_faces, outFbo.getWidth(), outFbo.getHeight());
faceDetector.ConvertBoxCoordsToOriginalSize(detected_faces, model_output_fbo_1.getWidth(), model_output_fbo_1.getHeight());
/* As no input is generated for the emotion recognition model, run a dummy vector through the model
So it can load */
if(firstRun){
// map
inferDepthImage(model_output_fbo, map.fboImage, depth);
// video player
inferDepthImage(model_output_fbo_1, img, depth_small);
/*
Create a dummy initial input of batch_size = 5, as
when initialising the model, it will attempt to create a space in memory for this array.
@ -115,9 +106,6 @@ void ofApp::update(){
} else {
//inferEmotionalState();
thread1.update();
// video player
inferDepthImage(model_output_fbo_1, img, depth_small);
}
@ -146,10 +134,10 @@ void ofApp::draw(){
renderDepthMap();
// if(!firstRun && detected_faces.size() != 0){
// faceDetector.DrawBox(detected_faces);
// faceDetector.DrawCenter(detected_faces);
// }
if(!firstRun && detected_faces.size() != 0){
faceDetector.DrawBox(detected_faces);
faceDetector.DrawCenter(detected_faces);
}
ofPushMatrix();
ofSetColor(255);

4
src/ofApp.h

@ -69,5 +69,7 @@ class ofApp : public ofBaseApp{
ofFbo model_output_fbo_1;
ofFbo screen_fbo;
ModelThread thread1;
ModelThread threadMap;
ModelThread threadVideo;
ModelThread threadYolo;
};

Loading…
Cancel
Save