Browse Source

attempting to thread

tsns-map
cailean 3 months ago
parent
commit
d1c91b2ba8
  1. 70
      src/ModelThread.h
  2. 19
      src/Onnx.cpp
  3. 2
      src/Onnx.h
  4. 40
      src/Player.cpp
  5. 6
      src/Player.h
  6. 2
      src/main.cpp
  7. 53
      src/ofApp.cpp
  8. 3
      src/ofApp.h

70
src/ModelThread.h

@ -0,0 +1,70 @@
#include "ofMain.h"
#include "Onnx.h"
class ModelThread : public ofThread
{
public:
ofImage* img;
ofFbo* fbo;
Onnx* model;
~ModelThread(){
stop();
waitForThread(false);
}
void setup(ofImage* _img, ofFbo* _fbo, Onnx* _model){
std::lock_guard<std::mutex> lock(mutex);
this->img = _img;
this->fbo = _fbo;
this->model = _model;
}
void start(){
startThread();
}
void stop(){
stopThread();
condition.notify_all();
}
void threadedFunction(){
while(isThreadRunning()){
std::unique_lock<std::mutex> lock(mutex);
inferDepthImage(fbo, img, model);
condition.wait(lock);
}
}
void update(){
std::lock_guard<std::mutex> lock(mutex);
condition.notify_one();
}
void inferDepthImage(ofFbo* fbo, ofImage* img, Onnx* model){
std::cout << "infer" << std::endl;
auto output_tensors = model->Run(*img);
float* output_ptr = output_tensors.front().GetTensorMutableData<float>();
size_t num_elements = output_tensors.front().GetTensorTypeAndShapeInfo().GetElementCount();
std::cout << "done" << std::endl;
float min_value = model->ReduceMin(output_ptr, num_elements);
float max_value = model->ReduceMax(output_ptr, num_elements);
std::cout << "done1" << std::endl;
model->Normalize(output_ptr, num_elements, min_value, max_value);
std::cout << "don2e" << std::endl;
model->DataToFbo(output_ptr, 518, 518, *fbo);
std::cout << "done3" << std::endl;
}
protected:
std::condition_variable condition;
};

19
src/Onnx.cpp

@ -52,7 +52,7 @@ void Onnx::Setup(ORTCHAR_T* modelPath, bool isLog, bool useCuda){
// Runs the model, given an image
std::vector<Ort::Value> Onnx::Run(ofImage &img){
std::cout << "hallo" << std::endl;
auto start = std::chrono::high_resolution_clock::now();
TransformImage(img);
@ -228,26 +228,27 @@ void Onnx::Normalize(float* data, size_t size, float min_value, float max_value)
}
// Coverts the output tensor data to a texture of a given ofFbo.
void Onnx::DataToFbo(const float* data, size_t width, size_t height, ofFbo& fbo){
void Onnx::DataToFbo(float* data, size_t width, size_t height, ofFbo& fbo){
// Convert data into opencv mat
cv::Mat inputMat(height, width, CV_32FC1, const_cast<float*>(data));
// Convert to 8-bit grayscale Mat
//cv::Mat inputMat(height, width, CV_32FC1, const_cast<float*>(data));
cv::Mat inputMat(height, width, CV_32FC1);
memcpy(inputMat.data, data, width * height * sizeof(float));
// // Convert to 8-bit grayscale Mat
cv::Mat inputMat8U;
inputMat.convertTo(inputMat8U, CV_8UC1, 255.0); // Convert float to 8-bit grayscale
// Resize the image using OpenCV
// // Resize the image using OpenCV
cv::Mat resizedMat;
cv::resize(inputMat8U, resizedMat, cv::Size(fbo.getWidth(), fbo.getHeight()), 0, 0, cv::INTER_LINEAR);
// Convert OpenCV Mat to ofPixels
// // Convert OpenCV Mat to ofPixels
ofPixels pixels;
pixels.allocate(fbo.getWidth(), fbo.getHeight(), OF_PIXELS_GRAY);
// Copy data from resizedMat to ofPixels
// // Copy data from resizedMat to ofPixels
memcpy(pixels.getData(), resizedMat.data, fbo.getWidth() * fbo.getHeight());
// Update FBO with new pixels
// // Update FBO with new pixels
fbo.begin();
ofTexture& texture = fbo.getTexture();
texture.loadData(pixels);

2
src/Onnx.h

@ -31,7 +31,7 @@
float ReduceMin(const float* data, size_t size);
float ReduceMax(const float* data, size_t size);
void Normalize(float* data, size_t size, float min_value, float max_value);
void DataToFbo(const float* data, size_t width, size_t height, ofFbo& fbo);
void DataToFbo(float* data, size_t width, size_t height, ofFbo& fbo);
void Softmax(float* data, size_t size);
bool timeStamp = true;
bool log = false;

40
src/Player.cpp

@ -15,9 +15,9 @@ void Player::Setup(){
(2) Updates the video texture, and sets the current frame value */
void Player::Update(ofImage &img){
if(!img.isAllocated() || img.getWidth() != videoPlayer.getWidth() || img.getHeight() != videoPlayer.getHeight()){
img.allocate(videoPlayer.getWidth(), videoPlayer.getHeight(), OF_IMAGE_COLOR);
std::cout << "allocating new ofImage" << std::endl;
if(!img.isAllocated()){
img.allocate(ofGetWindowWidth() / 2, ofGetWindowHeight(), OF_IMAGE_COLOR);
temp.allocate(ofGetWindowWidth() / 2, ofGetWindowHeight(), GL_RGB);
}
if(videoPlayer.isLoaded()){
@ -25,6 +25,38 @@ void Player::Update(ofImage &img){
playerCurrentFrame = videoPlayer.getCurrentFrame();
videoPlayer.update();
videoPlayer.play();
// Calculate the target width and height for model_output_fbo_1
float fbo_1_target_width = img.getWidth(); // 1/2 of the screen width (990px)
float fbo_1_target_height = img.getHeight(); // Full height of the screen
// Calculate the aspect ratio of the video and the FBO
float video_aspect_ratio = videoPlayer.getWidth() / videoPlayer.getHeight();
float fbo_aspect_ratio = fbo_1_target_width / fbo_1_target_height;
// Adjust the scaling to cover the FBO area while maintaining aspect ratio
if (fbo_aspect_ratio > video_aspect_ratio) {
// FBO is wider; scale by width to fill the FBO
new_width = fbo_1_target_width;
new_height = new_width / video_aspect_ratio; // Scale height to maintain aspect ratio
} else {
// FBO is taller; scale by height to fill the FBO
new_height = fbo_1_target_height;
new_width = new_height * video_aspect_ratio; // Scale width to maintain aspect ratio
}
// Center the video to ensure it fills the FBO and is cropped if necessary
x_pos = (ofGetWindowWidth() * 0.25) - (new_width / 2);
y_pos = (ofGetWindowHeight() - new_height) / 2; // Center vertically
temp.begin();
videoPlayer.draw(x_pos, y_pos, new_width, new_height);
temp.end();
ofPixels pixels;
temp.readToPixels(pixels);
img.setFromPixels(pixels);
}
}
@ -44,7 +76,7 @@ ofPixels Player::GetVideoPixels(){
void Player::SetVideo(std::string path, ofFbo &fbo){
videoPlayer.load(path);
videoPlayer.setFrame(800);
fbo.allocate(videoPlayer.getWidth(), videoPlayer.getHeight(), GL_RGB);
fbo.allocate(ofGetWindowWidth() / 2, ofGetWindowHeight(), GL_RGB);
}
// Sets a random frame in the active video

6
src/Player.h

@ -31,6 +31,12 @@ class Player {
glm::vec2 centerPosition;
ofFbo fbo;
ofFbo temp;
float x_pos;
float y_pos;
float new_width;
float new_height;
Player();

2
src/main.cpp

@ -6,7 +6,7 @@ int main( ){
//Use ofGLFWWindowSettings for more options like multi-monitor fullscreen
ofGLWindowSettings settings;
settings.setSize(1920, 1080);
settings.setSize(2000, 1000);
settings.setGLVersion(3, 2);
settings.windowMode = OF_WINDOW; //can also be OF_FULLSCREEN

53
src/ofApp.cpp

@ -30,7 +30,7 @@ void ofApp::setup(){
yolo.Setup(modelPath2, false, true);
depth.Setup(modelPath, false, true);
depth_small.Setup(modelPath4, false, true);
depth_small.Setup(modelPath4, true, true);
emotion.Setup(modelPath3, false, true);
/* Depth output fbo */
@ -43,6 +43,8 @@ void ofApp::setup(){
tempImage.allocate(emoteImage.getWidth(), emoteImage.getHeight(), OF_IMAGE_COLOR);
screen_fbo.allocate(window_width, window_height, GL_RGB);
thread1.setup(&map.fboImage, &model_output_fbo, &depth);
}
@ -51,8 +53,9 @@ void ofApp::setup(){
void ofApp::update(){
/* Check to see if the application has moved to the first frame
As the models need to load first, as the first inference is quite slow */
if(ofGetFrameNum() > 0)
if(ofGetFrameNum() > 0 && ofGetFrameNum() < 2)
firstRun = false;
thread1.start();
/* Clear detetced face list */
detected_faces.clear();
@ -66,16 +69,15 @@ void ofApp::update(){
/* Setup model input using ofImage, allocated fbo */
player.Update(img);
img.setFromPixels(player.GetVideoPixels());
/* Run Models */
try{
// map
inferDepthImage(model_output_fbo, map.fboImage, depth);
// // map
// inferDepthImage(model_output_fbo, map.fboImage, depth);
// video player
inferDepthImage(model_output_fbo_1, img, depth_small);
// // video player
// inferDepthImage(model_output_fbo_1, img, depth_small);
// auto output_tensors_face = yolo.Run(model_input_img);
@ -92,7 +94,11 @@ void ofApp::update(){
/* As no input is generated for the emotion recognition model, run a dummy vector through the model
So it can load */
if(firstRun){
// map
inferDepthImage(model_output_fbo, map.fboImage, depth);
// video player
inferDepthImage(model_output_fbo_1, img, depth_small);
/*
Create a dummy initial input of batch_size = 5, as
when initialising the model, it will attempt to create a space in memory for this array.
@ -109,6 +115,10 @@ void ofApp::update(){
} else {
//inferEmotionalState();
thread1.update();
// video player
inferDepthImage(model_output_fbo_1, img, depth_small);
}
/* Run emotion inference */
@ -128,35 +138,10 @@ void ofApp::draw(){
screen_fbo.begin();
// Calculate the target width and height for model_output_fbo_1
float fbo_1_target_width = window_width * 0.5; // 1/2 of the screen width (990px)
float fbo_1_target_height = window_height; // Full height of the screen
// Calculate the aspect ratio of the video and the FBO
float video_aspect_ratio = model_output_fbo_1.getWidth() / model_output_fbo_1.getHeight();
float fbo_aspect_ratio = fbo_1_target_width / fbo_1_target_height;
// Adjust the scaling to cover the FBO area while maintaining aspect ratio
float new_width, new_height;
if (fbo_aspect_ratio > video_aspect_ratio) {
// FBO is wider; scale by width to fill the FBO
new_width = fbo_1_target_width;
new_height = new_width / video_aspect_ratio; // Scale height to maintain aspect ratio
} else {
// FBO is taller; scale by height to fill the FBO
new_height = fbo_1_target_height;
new_width = new_height * video_aspect_ratio; // Scale width to maintain aspect ratio
}
// Center the video to ensure it fills the FBO and is cropped if necessary
float x_pos = (window_width * 0.75) - (new_width / 2);
float y_pos = (window_height - new_height) / 2; // Center vertically
// Draw the scaled video inside the FBO
model_output_fbo_1.draw(x_pos, y_pos, new_width, new_height);
model_output_fbo.draw(0, 0);
model_output_fbo_1.draw(window_width / 2, 0);
screen_fbo.end();
renderDepthMap();

3
src/ofApp.h

@ -10,6 +10,7 @@
#include "Map.h"
#include <chrono>
#include <iostream>
#include "ModelThread.h"
class ofApp : public ofBaseApp{
@ -67,4 +68,6 @@ class ofApp : public ofBaseApp{
ofFbo model_output_fbo;
ofFbo model_output_fbo_1;
ofFbo screen_fbo;
ModelThread thread1;
};

Loading…
Cancel
Save