Compare commits

...

20 Commits

  1. 27
      .vscode/c_cpp_properties.json
  2. 3
      .vscode/settings.json
  3. 10
      addons.make
  4. 4
      onnx-test.code-workspace
  5. 282
      src/Map.cpp
  6. 87
      src/Map.h
  7. 99
      src/ModelThread.h
  8. 37
      src/Onnx.cpp
  9. 6
      src/Onnx.h
  10. 121
      src/Player.cpp
  11. 19
      src/Player.h
  12. 21
      src/QuadSource/QuadSource.cpp
  13. 17
      src/QuadSource/QuadSource.h
  14. 40
      src/Request.cpp
  15. 46
      src/Request.h
  16. 159
      src/Server.cpp
  17. 54
      src/Server.h
  18. 6
      src/Yolo.cpp
  19. 4
      src/main.cpp
  20. 240
      src/ofApp.cpp
  21. 45
      src/ofApp.h

27
.vscode/c_cpp_properties.json

@ -107,7 +107,31 @@
"${workspaceRoot}",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxOpenCv/src",
"/usr/local/onnxruntime-linux-x64-gpu-1.19.2/include",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxOpenCv/libs/opencv/include/opencv4"
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxOpenCv/libs/opencv/include/opencv4",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxOsc/src",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxNetwork/src",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxGui/src",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxTSNE/src",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxOsc/libs/oscpack/src/osc",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxOsc/libs/oscpack/src/ip",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxTSNE/src/bhtsne",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Sources",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Application",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Info",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Surfaces",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Types",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Types",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Gui",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Gui",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Gui/Widgets",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Utils",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/MediaServer",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxXmlSettings/src",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxXmlSettings/libs",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/UserInterface",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Commands",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src/Application/Modes"
],
"browse": {
"limitSymbolsToIncludedHeaders": true,
@ -119,6 +143,7 @@
"${workspaceRoot}/../../../addons",
"${workspaceRoot}/../../../libs/openFrameworks",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxOpenCv/src",
"/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/addons/ofxPiMapper/src",
"/usr/local/onnxruntime-linux-x64-gpu-1.19.2/include"
]
},

3
.vscode/settings.json

@ -0,0 +1,3 @@
{
"C_Cpp.errorSquiggles": "enabled"
}

10
addons.make

@ -1,2 +1,12 @@
ofxOpenCv
ofxOpenCv
ofxOsc
ofxOsc
ofxNetwork
ofxNetwork
ofxGui
ofxGui
ofxTSNE
ofxTSNE
ofxPiMapper
ofxPiMapper

4
onnx-test.code-workspace

@ -101,7 +101,9 @@
"__hash_table": "cpp",
"__split_buffer": "cpp",
"__tree": "cpp",
"filesystem": "cpp"
"filesystem": "cpp",
"__mutex_base": "cpp",
"__node_handle": "cpp"
}
}
}

282
src/Map.cpp

@ -0,0 +1,282 @@
#include "Map.h"
#include "algorithm"
Map::Map(){
}
void Map::Setup(){
scale = 10000;
isDebug = true;
json_embeddings = ofLoadJson(jsonPath);
if(!json_embeddings.is_array()){
ofLogError() << "JSON is not an array";
return;
} else {
std::cout << "JSON LOADED" << std::endl;
SetupTSNE();
}
mapFbo.allocate(ofGetWindowWidth() / 2, ofGetWindowHeight(), GL_RGB);
fboImage.allocate(ofGetWindowWidth() / 2, ofGetWindowHeight(), OF_IMAGE_COLOR);
Setup3D();
SetupNodes();
}
void Map::Update(std::string& vp_img, bool& is_active){
time = ofGetElapsedTimef();
for(auto& n : nodes){
glm::vec2 offset = n.amplitude * sin(n.speed * time);
n.offset = glm::vec3(offset.x, offset.y, 0);
}
std::cout << cam.getPosition() << std::endl;
if(is_active){
has_reached = false;
}
if(!is_active)
MoveCamera(vp_img);
}
void Map::Draw(){
mapFbo.begin();
cam.begin();
ofClear(0, 0, 0, 1);
ofMatrix4x4 projectionMatrix = cam.getProjectionMatrix();
ofMatrix4x4 viewMatrix = cam.getModelViewMatrix();
ofMatrix4x4 mvpMatrix = projectionMatrix * viewMatrix;
for (auto& n :sortedNodes){
glm::vec3 node_position = n->position + n->offset;
if(isFrustum(node_position, 50)){
n->texture.getTexture().bind();
ofPushMatrix();
ofFill();
n->geom.setPosition(node_position);
n->geom.draw();
ofPopMatrix();
n->texture.getTexture().unbind();
}
}
cam.end();
mapFbo.end();
mapFbo.readToPixels(fboPixels);
fboImage.setFromPixels(fboPixels);
}
/*
Setup texture for each node
*/
void Map::SetupNodes(){
std::cout << "Setting up nodes.." << std::endl;
for (auto& node : nodes){
ofImage img;
ofPlanePrimitive plane;
img.load("data/" + node.image_path);
int imageW = img.getWidth() * 0.1;
int imageH = img.getHeight() * 0.1;
int maxWidth = 1;
int maxHeight = 1;
float aspectRatio = (float)imageW / (float)imageH;
// Determine plane dimensions based on aspect ratio and constraints
float planeW, planeH;
if (aspectRatio > (float)maxWidth / maxHeight) {
// Image is wider relative to the max constraints
planeW = maxWidth;
planeH = maxWidth / aspectRatio;
} else {
// Image is taller relative to the max constraints
planeH = maxHeight;
planeW = maxHeight * aspectRatio;
}
// Ensure that dimensions do not exceed constraints
if (planeH > maxHeight) {
planeH = maxHeight;
planeW = maxHeight * aspectRatio;
}
if (planeW > maxWidth) {
planeW = maxWidth;
planeH = maxWidth / aspectRatio;
}
plane.set(imageW, imageH);
plane.setPosition(node.position);
plane.setResolution(10, 10);
plane.mapTexCoordsFromTexture(img.getTexture());
img.getTexture().setTextureWrap(GL_REPEAT, GL_REPEAT);
plane.setScale(1);
node.geom = plane;
node.texture = img;
node.speed = glm::vec2(ofRandom(0.1, 0.5), ofRandom(0.1, 0.5));
node.amplitude = glm::vec2(ofRandom(1, 5), ofRandom(1, 5));
node.phase = glm::vec2(ofRandom(0, TWO_PI), ofRandom(0, TWO_PI));
}
std::cout << "Finished setting up nodes!" << std::endl;
}
/*
Setup TSNE - reads JSON, converts to points, creates nodes & hashmap
*/
void Map::SetupTSNE(){
for (const auto& entry: json_embeddings) {
if (entry.contains("vector") && entry["vector"].is_array()) {
Node n;
n.foldername = entry["folder"];
n.image_path = entry["image"];
n.isLost = entry["lost"].get<int>();
std::vector<float> emb;
for (const auto& value: entry["vector"]){
if(value.is_number()){
emb.push_back(value.get<float>());
} else {
ofLogError() << "Vector value is not a number";
}
}
n.emotion_vector = emb;
nodes.push_back(n);
embeddings.push_back(emb);
}
}
std::cout << nodes.size() << std::endl;
points = tsne.run(embeddings, dims, perplexity, theta, normalise, runManually);
for (size_t i = 0; i < points.size(); i++){
const auto& vec = points[i];
auto& n = nodes[i];
n.position = (glm::vec3(vec[0] * scale, vec[1] * scale, vec[2] * scale));
node_hash_map[n.image_path] = &n;
}
// Instead of sorting nodes, just create a sorted reference list
for (auto& node : nodes) {
sortedNodes.push_back(&node); // Pointers to the original nodes
}
SortNodes(); // Sort the references for rendering
}
/*
Setup 3D environment
*/
void Map::Setup3D(){
cam.setFov(20);
cam.removeAllInteractions();
}
/*
Query hashmap
*/
bool Map::SearchMap(std::string& search_string){
std::string t = search_string;
if(node_hash_map.find(t) != node_hash_map.end()){
Node* n = node_hash_map[t];
std::cout << n->foldername << std::endl;
return true;
} else {
return false;
}
}
/* search if image is in the hashmap -> if true, move to its position */
void Map::MoveCamera(std::string& vp_img){
if(SearchMap(vp_img)){
Node* n = node_hash_map[vp_img];
glm::vec3 cur_pos(cam.getPosition().x, cam.getPosition().y, cam.getPosition().z);
glm::vec3 target_pos((n->position.x), (n->position.y), (n->position.z) + 100);
glm::vec3 dir = target_pos - cur_pos;
float dist = glm::length(dir);
dir = glm::normalize(dir);
accel = dir * 0.02f;
vel += accel;
// Define minimum and maximum velocities based on distance
const float minVelocity = 0.01f; // Minimum velocity when close to the target
const float maxVelocityDistance = 1000.0f; // Distance at which max velocity is applied
const float maxVelocity = 50.0f; // Maximum velocity
// Scale max velocity based on distance
float dynamicMaxVelocity = glm::clamp(dist / maxVelocityDistance * maxVelocity, minVelocity, maxVelocity);
// Clamp the velocity to the range [-dynamicMaxVelocity, dynamicMaxVelocity]
float velocityLength = glm::length(vel);
if (velocityLength > dynamicMaxVelocity) {
vel = glm::normalize(vel) * dynamicMaxVelocity;
}
// Update position
glm::vec3 newPosition = cur_pos + vel;
if(cam.getPosition() == target_pos){
has_reached = true;
}
if(!has_reached){
cam.setPosition(target_pos);
}
std::cout << "velocity " << vel << std::endl;
std::cout << "target " << target_pos << std::endl;
std::cout << "new position " << cam.getPosition() << std::endl;
}
}
void Map::buttonPressed(int key){
if(key==OF_KEY_LEFT){
z_adj -= 1;
} else {
z_adj += 1;
}
}
/*
Sort nodes by z position, for draw calls (lowest -> highest)
*/
void Map::SortNodes(){
std::sort(sortedNodes.begin(), sortedNodes.end(), [](const Node* a, const Node* b){
return a->position.z < b->position.z; // Sorting by z-position
});
}
bool Map::isFrustum(const glm::vec3& position, float radius){
float box_w = 2000;
float box_h = 2000;
glm::vec3 cam_position = cam.getPosition();
float left = cam_position.x - box_w / 2.0f;
float right = cam_position.x + box_w / 2.0f;
float top = cam_position.y + box_h / 2.0f;
float bottom = cam_position.y - box_h / 2.0f;
// Check if the object (with radius) is within the bounding box
if (position.x + radius < left || position.x - radius > right) return false;
if (position.y + radius < bottom || position.y - radius > top) return false;
// Z-depth doesn't matter in this approach, so we're ignoring it
return true;
}

87
src/Map.h

@ -0,0 +1,87 @@
#ifndef _MAP
#define _MAP
#include "ofMain.h"
#include "ofxOsc.h"
#include "ofxTSNE.h"
struct Node {
glm::vec2 speed;
glm::vec2 amplitude;
glm::vec2 phase;
glm::vec3 offset;
ofImage texture;
ofPlanePrimitive geom;
glm::vec3 position;
std::string image_path;
std::string foldername;
std::vector<float> emotion_vector;
int isLost;
};
class Map {
public:
/*
Methods
*/
Map();
void Setup();
void Update(std::string& vp_img, bool& is_active);
void Draw();
void SetupNodes();
void SetupTSNE();
void Setup3D();
bool SearchMap(std::string& search_string);
void SortNodes();
void MoveCamera(std::string& vp_img);
bool isFrustum(const glm::vec3& position, float radius);
void buttonPressed(int key);
/*
Variables
*/
bool isDebug;
float time;
ofFbo mapFbo;
std::vector<Node> nodes;
std::vector<Node*> sortedNodes;
float scale;
ofxTSNE tsne;
vector<vector<double>> points;
vector<vector<float>> embeddings;
int dims = 3;
float perplexity = 20;
float theta = 0.2;
bool normalise = true;
bool runManually = false;
ofJson json_embeddings;
std::string jsonPath = "data/json/embeddings.json";
std::unordered_map<std::string, Node*> node_hash_map;
/*
3D Variables
*/
ofLight light;
ofEasyCam cam;
float zoom;
glm::vec3 accel;
glm::vec3 vel;
float z_adj = 0;
bool has_reached = false;
ofImage fboImage;
ofPixels fboPixels;
private:
};
#endif

99
src/ModelThread.h

@ -0,0 +1,99 @@
#include "ofMain.h"
#include "Onnx.h"
#include "Yolo.h"
class ModelThread : public ofThread
{
public:
ofImage* img;
ofFbo* fbo;
Onnx* model;
Yolo* yolo;
std::vector<types::BoxfWithLandmarks>* detected_faces;
std::string model_type;
// emotional recognition model
std::vector<ofImage>* croppedFaces;
float* emotional_data;
~ModelThread(){
stop();
waitForThread(false);
}
void setup(ofImage* _img, ofFbo* _fbo, Onnx* _model){
std::lock_guard<std::mutex> lock(mutex);
this->img = _img;
this->fbo = _fbo;
this->model = _model;
this->model_type = "depth";
}
void setupYolo(ofImage* _img, std::vector<types::BoxfWithLandmarks>* _detected_faces, Onnx* _model, Yolo* _yolo){
std::lock_guard<std::mutex> lock(mutex);
this->img = _img;
this->detected_faces = _detected_faces;
this->model_type = "yolo";
this->model = _model;
this->yolo = _yolo;
}
void start(){
startThread();
}
void stop(){
stopThread();
condition.notify_all();
}
void threadedFunction(){
while(isThreadRunning()){
if(model_type == "depth"){
std::unique_lock<std::mutex> lock(mutex);
inferDepthImage(fbo, img, model);
condition.wait(lock);
} else if(model_type == "yolo") {
std::unique_lock<std::mutex> lock(mutex);
inferYolo();
condition.wait(lock);
}
}
}
void update(){
std::lock_guard<std::mutex> lock(mutex);
condition.notify_one();
}
void inferYolo(){
auto output_tensors_face = model->Run(*img);
auto output_faces = output_tensors_face.front().GetTensorTypeAndShapeInfo().GetShape();
unsigned int num_anchors = output_faces[1]; // Number of anchors
float* output_face_ptr = output_tensors_face.front().GetTensorMutableData<float>();
yolo->ParseOutput(output_face_ptr, *detected_faces, num_anchors);
}
void inferDepthImage(ofFbo* fbo, ofImage* img, Onnx* model){
auto output_tensors = model->Run(*img);
float* output_ptr = output_tensors.front().GetTensorMutableData<float>();
size_t num_elements = output_tensors.front().GetTensorTypeAndShapeInfo().GetElementCount();
float min_value = model->ReduceMin(output_ptr, num_elements);
float max_value = model->ReduceMax(output_ptr, num_elements);
model->Normalize(output_ptr, num_elements, min_value, max_value);
model->DataToFbo(output_ptr, 518, 518, *fbo);
}
protected:
std::condition_variable condition;
};

37
src/Onnx.cpp

@ -1,4 +1,5 @@
#include "Onnx.h"
#include <cmath>
// Setups the model. CUDA is enabled by default
void Onnx::Setup(ORTCHAR_T* modelPath, bool isLog, bool useCuda){
@ -51,7 +52,6 @@ void Onnx::Setup(ORTCHAR_T* modelPath, bool isLog, bool useCuda){
// Runs the model, given an image
std::vector<Ort::Value> Onnx::Run(ofImage &img){
auto start = std::chrono::high_resolution_clock::now();
TransformImage(img);
@ -75,14 +75,10 @@ std::vector<Ort::Value> Onnx::Run(ofImage &img){
std::transform(std::begin(output_node_names), std::end(output_node_names), std::begin(output_names_char),
[&](const std::string& str) { return str.c_str(); });
std::cout << "Running model... ";
try {
auto output_tensors = ort_session->Run(Ort::RunOptions{nullptr}, input_names_char.data(), input_tensors.data(),
input_names_char.size(), output_names_char.data(), output_names_char.size());
std::cout << "Done!" << std::endl;
if (timeStamp) {
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> elapsed = end - start;
@ -146,14 +142,10 @@ std::vector<Ort::Value> Onnx::RunBatch(std::vector<ofImage>& images){
std::transform(std::begin(output_node_names), std::end(output_node_names), std::begin(output_names_char),
[&](const std::string& str) { return str.c_str(); });
std::cout << "Running model... ";
try {
auto output_tensors = ort_session->Run(Ort::RunOptions{nullptr}, input_names_char.data(), input_tensors.data(),
input_names_char.size(), output_names_char.data(), output_names_char.size());
std::cout << "Done!" << std::endl;
if (timeStamp) {
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> elapsed = end - start;
@ -235,37 +227,30 @@ void Onnx::Normalize(float* data, size_t size, float min_value, float max_value)
}
// Coverts the output tensor data to a texture of a given ofFbo.
void Onnx::DataToFbo(const float* data, size_t width, size_t height, ofFbo& fbo){
void Onnx::DataToFbo(float* data, size_t width, size_t height, ofFbo& fbo){
// Convert data into opencv mat
cv::Mat inputMat(height, width, CV_32FC1, const_cast<float*>(data));
// Convert to 8-bit grayscale Mat
cv::Mat inputMat(height, width, CV_32FC1);
memcpy(inputMat.data, data, width * height * sizeof(float));
// // Convert to 8-bit grayscale Mat
cv::Mat inputMat8U;
inputMat.convertTo(inputMat8U, CV_8UC1, 255.0); // Convert float to 8-bit grayscale
// Resize the image using OpenCV
// // Resize the image using OpenCV
cv::Mat resizedMat;
cv::resize(inputMat8U, resizedMat, cv::Size(fbo.getWidth(), fbo.getHeight()), 0, 0, cv::INTER_LINEAR);
// Convert OpenCV Mat to ofPixels
ofPixels pixels;
// // Convert OpenCV Mat to ofPixels
pixels.allocate(fbo.getWidth(), fbo.getHeight(), OF_PIXELS_GRAY);
// Copy data from resizedMat to ofPixels
for (size_t y = 0; y < fbo.getHeight(); ++y) {
for (size_t x = 0; x < fbo.getWidth(); ++x) {
unsigned char value = resizedMat.at<uchar>(y, x);
pixels.setColor(x, y, ofColor(value));
}
}
// // Copy data from resizedMat to ofPixels
memcpy(pixels.getData(), resizedMat.data, fbo.getWidth() * fbo.getHeight());
}
// Update FBO with new pixels
void Onnx::SetPixels(ofFbo& fbo){
fbo.begin();
ofTexture& texture = fbo.getTexture();
texture.loadData(pixels);
fbo.end();
}
void Onnx::Softmax(float* data, size_t size) {

6
src/Onnx.h

@ -31,10 +31,12 @@
float ReduceMin(const float* data, size_t size);
float ReduceMax(const float* data, size_t size);
void Normalize(float* data, size_t size, float min_value, float max_value);
void DataToFbo(const float* data, size_t width, size_t height, ofFbo& fbo);
void DataToFbo(float* data, size_t width, size_t height, ofFbo& fbo);
void Softmax(float* data, size_t size);
bool timeStamp = true;
void SetPixels(ofFbo& fbo);
bool timeStamp = false;
bool log = false;
ofPixels pixels;
protected:
Ort::Env ort_env;

121
src/Player.cpp

@ -6,26 +6,68 @@ Player::Player(){
/* Basic ofVideoPlayer setup */
void Player::Setup(){
videoPlayer.setLoopState(OF_LOOP_NONE);
videoPlayer.setLoopState(OF_LOOP_NORMAL);
videoPlayer.setVolume(0);
}
/* Updated the video player:
(1) Allocates the required W x H for the model input
(2) Updates the video texture, and sets the current frame value */
void Player::Update(ofImage &img){
void Player::Update(ofImage &img, bool show_frame){
if(!img.isAllocated() || img.getWidth() != videoPlayer.getWidth() || img.getHeight() != videoPlayer.getHeight()){
img.allocate(videoPlayer.getWidth(), videoPlayer.getHeight(), OF_IMAGE_COLOR);
std::cout << "allocating new ofImage" << std::endl;
if(!img.isAllocated()){
img.allocate(ofGetWindowWidth() / 2, ofGetWindowHeight(), OF_IMAGE_COLOR);
temp.allocate(ofGetWindowWidth() / 2, ofGetWindowHeight(), GL_RGB);
}
if(videoPlayer.isLoaded()){
// Calculate the target width and height for model_output_fbo_1
float fbo_1_target_width = img.getWidth(); // 1/2 of the screen width (990px)
float fbo_1_target_height = img.getHeight(); // Full height of the screen
float fbo_aspect_ratio = fbo_1_target_width / fbo_1_target_height;
float aspect_ratio;
if(show_frame && frame.isAllocated()){
aspect_ratio = frame.getWidth() / frame.getHeight();
}
if(videoPlayer.isLoaded() && !show_frame){
hasVideo = true;
playerCurrentFrame = videoPlayer.getCurrentFrame();
std::cout << playerCurrentFrame << std::endl;
videoPlayer.update();
videoPlayer.play();
aspect_ratio = videoPlayer.getWidth() / videoPlayer.getHeight();
}
if (fbo_aspect_ratio > aspect_ratio) {
// FBO is wider; scale by width to fill the FBO
new_width = fbo_1_target_width;
new_height = new_width / aspect_ratio; // Scale height to maintain aspect ratio
} else {
// FBO is taller; scale by height to fill the FBO
new_height = fbo_1_target_height;
new_width = new_height * aspect_ratio; // Scale width to maintain aspect ratio
}
// Center the video to ensure it fills the FBO and is cropped if necessary
x_pos = (ofGetWindowWidth() * 0.25) - (new_width / 2);
y_pos = (ofGetWindowHeight() - new_height) / 2; // Center vertically
if(show_frame && frame.isAllocated()){
set_frame = true;
temp.begin();
frame.draw(x_pos, y_pos, new_width, new_height);
temp.end();
}
if(videoPlayer.isLoaded() && !show_frame){
temp.begin();
videoPlayer.draw(x_pos, y_pos, new_width, new_height);
temp.end();
}
ofPixels pixels;
temp.readToPixels(pixels);
img.setFromPixels(pixels);
}
void Player::Draw(){
@ -41,10 +83,49 @@ ofPixels Player::GetVideoPixels(){
/* Loads the video from path:
(1) Sets the frame
(2) Allocates the fbo dims for final render */
void Player::SetVideo(std::string path, ofFbo &fbo){
void Player::SetVideoOnAwake(std::string path, ofFbo &fbo){
videoPlayer.load(path);
videoPlayer.play();
videoPlayer.setFrame(800);
fbo.allocate(videoPlayer.getWidth(), videoPlayer.getHeight(), GL_RGB);
/* pause video initially */
fbo.allocate(ofGetWindowWidth() / 2, ofGetWindowHeight(), GL_RGB);
}
void Player::SetVideo(std::string path, std::string frame, bool is_active){
if(path.empty()){
path = "videos/output.mp4";
}
/* convert str frame -> int (remove _x)*/
int f_number = 0;
try {
size_t pos = frame.find("_");
std::string frame_number = frame.substr(0, pos);
f_number = std::stoi(frame_number);
} catch (exception e){
std::cout << "No underscore found in frame name" << std::endl;
}
if(!is_active){
if(videoPlayer.getMoviePath() != path){
videoPlayer.loadAsync(path);
videoPlayer.play();
}
if(set_frame){
if(videoPlayer.isPlaying() && playerCurrentFrame != f_number){
videoPlayer.setFrame(f_number);
if(playerCurrentFrame == f_number)
set_frame = false;
}
}
}
}
/* Loads a frame from path */
void Player::SetFrame(std::string path){
frame.load(path);
}
// Sets a random frame in the active video
@ -52,4 +133,26 @@ void Player::SetRandomFrame(){
int randomFrame = ofRandom(0, videoPlayer.getTotalNumFrames());
std::cout << "setting frame: " << randomFrame << std::endl;
videoPlayer.setFrame(randomFrame);
}
void Player::SetVideoPosition(ofFbo& output_fbo){
int playerW = videoPlayer.getWidth();
int playerH = videoPlayer.getHeight();
// Calculate the scaling to fit the 2/3 width and full height area
float targetWidth = output_fbo.getWidth();
float targetHeight = output_fbo.getHeight();
float scaleX = targetWidth / playerW;
float scaleY = targetHeight / playerH;
// Use the larger scaling factor to ensure coverage
float scale = std::max(scaleX, scaleY);
// Calculate scaled dimensions
int scaledWidth = playerW * scale;
int scaledHeight = playerH * scale;
// Center the video within the FBO
centerPosition = glm::vec2((targetWidth - scaledWidth) / 2, (targetHeight - scaledHeight) / 2);
}

19
src/Player.h

@ -8,11 +8,13 @@ class Player {
public:
void Setup();
void Update(ofImage &img);
void Update(ofImage &img, bool show_frame);
void Draw();
void SetVideo(std::string path, ofFbo &fbo);
void SetVideoOnAwake(std::string path, ofFbo &fbo);
void SetVideo(std::string path, std::string frame, bool show_frame);
void SetFrame(std::string path);
ofPixels GetVideoPixels();
void SetVideoPosition();
void SetVideoPosition(ofFbo& output_fbo);
void SetRandomFrame();
void SetupGUI();
void UpdateGUI();
@ -31,9 +33,18 @@ class Player {
glm::vec2 centerPosition;
ofFbo fbo;
ofFbo temp;
float x_pos;
float y_pos;
float new_width;
float new_height;
bool set_frame;
ofImage frame;
Player();
};
#endif

21
src/QuadSource/QuadSource.cpp

@ -0,0 +1,21 @@
#include "QuadSource.h"
void QuadSource::setup(){
// name
name = "test";
// allocate size
allocate(1512, 1080);
}
void QuadSource::update(){
}
void QuadSource::draw(){
// Fill FBO with our quads
ofClear(0);
//ofBackground(255, 120, 10);
fbo_in->draw(0, 0);
}

17
src/QuadSource/QuadSource.h

@ -0,0 +1,17 @@
#ifndef _QUADSOURCE
#define _QUADSOURCE
#include "ofMain.h"
#include "FboSource.h"
class QuadSource : public ofx::piMapper::FboSource{
public:
void setup();
void update();
void draw();
std::vector<ofRectangle> quads;
std::vector<float> quad_speeds;
};
#endif

40
src/Request.cpp

@ -0,0 +1,40 @@
#include "Request.h"
/* setup http server */
void Request::setup(std::string ip, int port, std::string page){
std::cout << "Initialising HTTP Server" << std::endl;
req.method = ofHttpRequest::POST;
req.url = "http://" + ip + ":" + ofToString(port) + "/" + page;
req.headers["Content-Type"] = "application/json";
}
/* send a request to vp_server & return frame/video/folder */
VPResp Request::query(Vector7D& in){
VPResp vp_resp;
try {
req.body = "{\"vector\": [" +
ofToString(in.angry) + "," +
ofToString(in.disgust) + "," +
ofToString(in.fear) + "," +
ofToString(in.happy) + "," +
ofToString(in.sad) + "," +
ofToString(in.surprise) + "," +
ofToString(in.neutral) + "]}";
auto resp = http.handleRequest(req);
json_resp = ofJson::parse(resp.data.getText());
vp_resp.folder = json_resp["folder"];
vp_resp.image = json_resp["image"];
vp_resp.video = json_resp["video"];
vp_resp.frame = json_resp["frame"];
vp_resp.lost = json_resp["lost"];
past_vp_resp = vp_resp;
return vp_resp;
} catch (exception e) {
// Some issue happening here when plugging in controllers, or when they initially connect
return past_vp_resp;
}
}

46
src/Request.h

@ -0,0 +1,46 @@
#ifndef _REQUEST
#define _REQUEST
#include "ofMain.h"
/* emotional embedding */
struct Vector7D {
float angry;
float disgust;
float fear;
float happy;
float sad;
float surprise;
float neutral;
bool operator!=(const Vector7D &other) const {
return angry != other.angry ||
disgust != other.disgust ||
fear != other.fear ||
happy != other.happy ||
sad != other.sad ||
surprise != other.surprise ||
neutral != other.neutral;
}
};
/* Vantage point query structure */
struct VPResp{
std::string folder;
std::string video;
std::string image;
std::string frame;
int lost;
};
class Request{
public:
void setup(std::string ip, int port, std::string page);
VPResp query(Vector7D& in);
ofHttpRequest req;
ofURLFileLoader http;
ofJson json_resp;
VPResp past_vp_resp;
};
#endif

159
src/Server.cpp

@ -0,0 +1,159 @@
#include "Server.h"
void Server::start(){
std::cout << "Initialising TCP sever" << std::endl;
server.setup(port);
osc_sender.setup(OSC_HOST, OSC_PORT);
http.setup(http_ip, http_port, http_page);
is_active = true;
previous_embedding = embedding;
last_change_time = std::chrono::steady_clock::now();
}
void Server::update(){
for ( int i = 0; i < server.getLastID(); i++){
if (server.isClientConnected(i)) {
const int buffer_size = 8;
char buffer[buffer_size];
int bytes_recieved = server.receiveRawBytes(i, buffer, buffer_size);
if (bytes_recieved == buffer_size){
float value;
int id;
memcpy(&value, buffer, sizeof(float));
memcpy(&id, buffer + sizeof(float), sizeof(int));
std::string ip_address = server.getClientIP(i);
addOrUpdateClient(id, value, ip_address);
}
}
}
updateEmbedding();
checkActivity();
sendOSCMessage();
if(debug){
printClients();
}
}
void Server::addOrUpdateClient(int client_id, float value, const std::string& ip_address){
ClientInfo client;
client.ip_address = ip_address;
client.value = value;
clients[client_id] = client;
}
void Server::updateEmbedding(){
for(const auto& c : clients){
const ClientInfo& info = c.second;
float val = std::round(info.value * 100.0f) / 100.0f;
switch(c.first){
case 0:
embedding.angry = val;
break;
case 1:
embedding.disgust = val;
break;
case 2:
embedding.fear = val;
break;
case 3:
embedding.happy = val;
break;
case 4:
embedding.sad = val;
break;
case 5:
embedding.surprise = val;
break;
case 6:
embedding.neutral = val;
embedding.fear = ofRandom(0.1, 0.6);
embedding.angry = ofRandom(0.01, 0.99);
embedding.happy = ofRandom(0.01, 0.99);
break;
}
}
}
void Server::printClients(){
for( const auto& c : clients){
int id = c.first;
const ClientInfo& info = c.second;
std::cout << "id: " << id
<< ", value: " << info.value
<< ", IP: " << info.ip_address << std::endl;
}
std::cout << is_active << std::endl;
}
/* check if the controllers are in use */
void Server::checkActivity(){
if (previous_embedding.neutral != embedding.neutral) { // Check if embedding has changed
last_change_time = std::chrono::steady_clock::now(); // Reset the timer if there is a change
previous_embedding = embedding; // Update the previous embedding to the current one
is_active = true;
sendHttpRequest();
} else {
// Calculate the time since the last change
auto now = std::chrono::steady_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::seconds>(now - last_change_time).count();
if (duration >= 2) {
is_active = false;
}
}
}
/* send osc msg, check if audio file exists and it is different to the past audiofile */
void Server::sendOSCMessage(){
std::vector<ofxOscMessage> messages;
ofxOscMessage me_0;
ofxOscMessage me_1;
ofxOscMessage me_2;
ofxOscMessage me_3;
ofxOscMessage me_file;
std::string audio_file = vp_resp.folder;
// Check if file exists in a given dir
ofFile file("/home/cailean/Desktop/rave/all_wav_files/" + audio_file + ".wav");
if(!is_active && (audio_file != past_audio_file) && file.exists()){
me_file.setAddress("/emote/filename");
me_file.addStringArg(audio_file + ".wav");
messages.push_back(me_file);
past_audio_file = audio_file;
}
me_0.setAddress("/emote/0");
me_0.addFloatArg(embedding.neutral);
messages.push_back(me_0);
me_1.setAddress("/emote/1");
me_1.addFloatArg(embedding.neutral);
messages.push_back(me_1);
me_2.setAddress("/emote/2");
me_2.addFloatArg(embedding.neutral);
messages.push_back(me_2);
me_3.setAddress("/emote/3");
me_3.addFloatArg(embedding.neutral);
messages.push_back(me_3);
for (auto& msg : messages){
osc_sender.sendMessage(msg, false);
}
}
/* sends request to http server if is_active = true */
void Server::sendHttpRequest(){
vp_resp = http.query(embedding);
}

54
src/Server.h

@ -0,0 +1,54 @@
#ifndef _SERVER
#define _SERVER
#include "ofMain.h"
#include "ofxNetwork.h"
#include "Request.h"
#include <unordered_map>
#include <chrono>
#include "ofxOsc.h"
#define OSC_HOST "127.0.0.1"
#define OSC_PORT 9002
struct ClientInfo {
float value;
std::string ip_address;
};
class Server{
public:
Server(int port, Vector7D& _embedding, bool debug, std::string _http_ip, int _http_port, std::string _http_page)
: port(port), embedding(_embedding), debug(debug), http_ip(_http_ip), http_port(_http_port), http_page(_http_page) {}
void start();
void update();
void addOrUpdateClient(int client_id, float value, const std::string& ip_address);
void printClients();
void updateEmbedding();
void checkActivity();
void sendHttpRequest();
void sendOSCMessage();
int port;
ofxTCPServer server;
std::unordered_map<int, ClientInfo> clients;
bool debug;
bool is_active;
Vector7D& embedding;
Request http;
std::string http_ip;
int http_port;
std::string http_page;
VPResp vp_resp;
private:
Vector7D previous_embedding;
std::chrono::time_point<std::chrono::steady_clock> last_change_time;
ofxOscSender osc_sender;
std::string past_audio_file;
};
#endif

6
src/Yolo.cpp

@ -47,14 +47,16 @@ void Yolo::ParseOutput(float* &output_tensors, std::vector<types::BoxfWithLandma
void Yolo::DrawBox(std::vector<types::BoxfWithLandmarks> &detected_faces){
for (const auto &face : detected_faces) {
ofNoFill();
ofDrawRectangle(face.box.x1, face.box.y1, face.box.x2 - face.box.x1, face.box.y2 - face.box.y1);
float w = ofGetWindowWidth() / 2;
ofDrawRectangle(face.box.x1 + w, face.box.y1, ((face.box.x2 + w) - (face.box.x1 + w)), face.box.y2 - face.box.y1);
}
}
// Simple helper to draw boxes at the center of the detected face.
void Yolo::DrawCenter(std::vector<types::BoxfWithLandmarks> &detected_faces){
ofNoFill();
ofDrawCircle(detected_faces[0].box.center, 5);
glm::vec2 position = detected_faces[0].box.center;
ofDrawCircle(position.x + ofGetWindowWidth() / 2, position.y, 5);
}

4
src/main.cpp

@ -6,7 +6,9 @@ int main( ){
//Use ofGLFWWindowSettings for more options like multi-monitor fullscreen
ofGLWindowSettings settings;
settings.setSize(1600, 800);
//1512x1080
settings.setSize(1980, 1080);
settings.setGLVersion(3, 2);
settings.windowMode = OF_WINDOW; //can also be OF_FULLSCREEN
auto window = ofCreateWindow(settings);

240
src/ofApp.cpp

@ -2,109 +2,163 @@
//--------------------------------------------------------------
void ofApp::setup(){
ofSetFrameRate(24);
/* ofSettings */
ofDisableArbTex();
ofSetVerticalSync(true);
window_width = 1512;
window_height = ofGetWindowHeight();
player.Setup();
player.SetVideo("videos/demo.mp4", fbo);
/* setup pi_mapper */
ofx::piMapper::VideoSource::enableAudio = false;
ofx::piMapper::VideoSource::useHDMIForAudio = false;
projection_mapper.registerFboSource(quad_src);
projection_mapper.setup();
emoteImage.allocate(260, 260);
tempImage.allocate(emoteImage.getWidth(), emoteImage.getHeight(), OF_IMAGE_COLOR);
/* load font */
tf.load("data/fonts/jetbrainsmono-regular.ttf", 20);
/* load shader */
depthToColourShader.load("data/shader/rampShader.vert", "data/shader/rampShader.frag");
/* setup map */
map.Setup();
/* setup video */
player.Setup();
player.SetVideoOnAwake("dataset/20230601_webexclusi-irelandsey_cl11536538/20230601_webexclusi-irelandsey_cl11536538.mp4", model_output_fbo_1);
player.SetFrame("demo.jpg");
/* setup models (modelPath, log, useCuda) */
ORTCHAR_T* modelPath = "/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/apps/myApps/onnx-test/bin/data/depth_anything_v2_vitb.onnx";
ORTCHAR_T* modelPath2 = "/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/apps/myApps/onnx-test/bin/data/yolov5s-face.onnx";
ORTCHAR_T* modelPath3 = "/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/apps/myApps/onnx-test/bin/data/rgb_emotion.onnx";
ORTCHAR_T* modelPath4 = "/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/apps/myApps/onnx-test/bin/data/depth_anything_v2_vits.onnx";
//yolo.Setup(modelPath2, false, true);
depth.Setup(modelPath, true, true);
depth_small.Setup(modelPath, true, true);
//emotion.Setup(modelPath3, false, true);
/* Depth output fbo */
model_output_fbo.allocate(window_width / 2, window_height, GL_RGB);
/* Shader output */
rampedFbo.allocate(window_width, window_height);
/* pi_mapper set output fbo_src */
quad_src.setFbo(&rampedFbo);
emoteImage.allocate(260, 260);
tempImage.allocate(emoteImage.getWidth(), emoteImage.getHeight(), OF_IMAGE_COLOR);
screen_fbo.allocate(window_width, window_height, GL_RGB);
threadMap.setup(&map.fboImage, &model_output_fbo, &depth);
threadVideo.setup(&img, &model_output_fbo_1, &depth_small);
//threadYolo.setupYolo(&img, &detected_faces, &yolo, &faceDetector);
/*
Create a dummy initial input of batch_size = 5, as
when initialising the model, it will attempt to create a space in memory for this array.
If the batch_size does change it will completely slow down inference, due to how the cudnn_search_algo is set.
None of the other search alogithms bar EXHAUSTIVE will work.. no idea why.
/* Setup Models (modelPath, log, useCuda) */
yolo.Setup(modelPath2, false, true);
depth.Setup(modelPath, false, true);
emotion.Setup(modelPath3, false, true);
for(int i = 0; i < emotionImageMaxBatchSize; i++){
tempImage.setFromPixels(emoteImage.getPixels());
croppedFaces.push_back(tempImage);
}
*/
/* setup server (http & tcp) */
server = std::make_unique<Server>(12345, embedding, false, "192.168.0.253", 2000, "search");
server->start();
}
//--------------------------------------------------------------
void ofApp::update(){
/* update server - check for clients and values */
server->update();
/* Check to see if the application has moved to the first frame
As the models need to load first, as the first inference is quite slow */
if(ofGetFrameNum() > 0)
if(ofGetFrameNum() < 2){
firstRun = false;
threadMap.start();
threadVideo.start();
//threadYolo.start();
}
/* Clear detetced face list */
detected_faces.clear();
/* Setup model input using ofImage */
player.Update(img);
img.setFromPixels(player.GetVideoPixels());
/* tsnemap fbo - first run to allocate fbo */
map.Update(server->vp_resp.image, server->is_active);
if(firstRun){
map.Draw();
}
/* Run Models */
/* Setup model input using ofImage, allocated fbo, checks if a frame or video_frme should be drawn to the screen */
displayFrame();
/* Run Models, and set pixels */
try{
auto output_tensors = depth.Run(img);
float* output_ptr = output_tensors.front().GetTensorMutableData<float>();
size_t num_elements = output_tensors.front().GetTensorTypeAndShapeInfo().GetElementCount();
float min_value = depth.ReduceMin(output_ptr, num_elements);
float max_value = depth.ReduceMax(output_ptr, num_elements);
depth.Normalize(output_ptr, num_elements, min_value, max_value);
depth.DataToFbo(output_ptr, 518, 518, fbo);
auto output_tensors_face = yolo.Run(img);
auto output_faces = output_tensors_face.front().GetTensorTypeAndShapeInfo().GetShape();
unsigned int num_anchors = output_faces[1]; // Number of anchors
threadMap.update();
threadVideo.update();
//threadYolo.update();
float* output_face_ptr = output_tensors_face.front().GetTensorMutableData<float>();
faceDetector.ParseOutput(output_face_ptr, detected_faces, num_anchors);
faceDetector.ConvertBoxCoordsToOriginalSize(detected_faces, fbo.getWidth(), fbo.getHeight());
depth.SetPixels(model_output_fbo);
depth_small.SetPixels(model_output_fbo_1);
//faceDetector.ConvertBoxCoordsToOriginalSize(detected_faces, model_output_fbo_1.getWidth(), model_output_fbo_1.getHeight());
/* As no input is generated for the emotion recognition model, run a dummy vector through the model
So it can load */
if(firstRun){
/*
Create a dummy initial input of batch_size = 5, as
when initialising the model, it will attempt to create a space in memory for this array.
If the batch_size does change it will completely slow down inference, due to how the cudnn_search_algo is set.
None of the other search alogithms bar EXHAUSTIVE will work.. no idea why.
*/
for(int i = 0; i < emotionImageMaxBatchSize; i++){
tempImage.setFromPixels(emoteImage.getPixels());
croppedFaces.push_back(tempImage);
}
// Run model to warmup
auto emotion_output_tensor = emotion.RunBatch(croppedFaces);
} else {
//inferEmotionalState();
}
/* Run emotion inference */
//inferEmotionalState();
So it can load.
auto emotion_output_tensor = emotion.RunBatch(croppedFaces);
*/
} catch (exception e){
std::cout << "Model did not run" << std::endl;
}
projection_mapper.update();
}
//--------------------------------------------------------------
void ofApp::draw(){
fbo.draw(0, 0);
map.Draw();
screen_fbo.begin();
model_output_fbo.draw(0, 0);
model_output_fbo_1.draw(window_width / 2, 0);
screen_fbo.end();
renderDepthMap();
// if(!firstRun && detected_faces.size() != 0){
// faceDetector.DrawBox(detected_faces);
// faceDetector.DrawCenter(detected_faces);
// }
ofPushMatrix();
ofSetColor(255);
ofSetBackgroundColor(0);
tf.drawString(std::to_string(ofGetFrameRate()), 10, 30);
ofPopMatrix();
// printEmotions();
// send final fbo (ramp.fbo) to pi_mapper
//projection_mapper.draw();
if(!firstRun){
faceDetector.DrawBox(detected_faces);
faceDetector.DrawCenter(detected_faces);
}
// emoteImage.draw(640, 0);
// for(auto& face : detected_faces){
// ofDrawBitmapString(std::to_string(face.box.emotional_state.emotions[0]), 700, 300);
@ -114,7 +168,6 @@ void ofApp::draw(){
//--------------------------------------------------------------
void ofApp::inferEmotionalState(){
/*
Max faces to process with the model (5)
*/
@ -135,13 +188,13 @@ void ofApp::inferEmotionalState(){
for each image -> set emotional state in detected_faces array
*/
auto emotion_output_tensor = emotion.RunBatch(croppedFaces);
auto& output_tensor = emotion_output_tensor.front();
auto output_shap = output_tensor.GetTensorTypeAndShapeInfo().GetShape();
size_t batch_size = output_shap[0]; // Number of images in the batch
size_t num_classes = output_shap[1]; // Number of emotion classes
size_t num_classes = 7; //output_shap[1]; // Number of emotion classes
float* emotional_data = output_tensor.GetTensorMutableData<float>();
emotional_data = output_tensor.GetTensorMutableData<float>();
for (size_t i = 0; i < max_faces_to_process; i++){
@ -153,36 +206,71 @@ void ofApp::inferEmotionalState(){
}
}
/*
Depth Map Shader Pass
*/
void ofApp::renderDepthMap(){
rampedFbo.begin();
// depthToColourShader.begin();
// depthToColourShader.setUniformTexture("tex0", screen_fbo.getTexture(), 0);
// depthToColourShader.setUniform1f("texW", rampedFbo.getWidth());
// depthToColourShader.setUniform1f("texH", rampedFbo.getHeight());
screen_fbo.draw(0, 0);
// depthToColourShader.end();
rampedFbo.end();
rampedFbo.draw(0,0);
}
void ofApp::printEmotions(){
ofPushMatrix();
ofSetColor(255);
ofSetBackgroundColor(0);
char buffer[50]; // Create a buffer to hold the formatted string
std::sprintf(buffer, "Value: %.3f", embedding.neutral); //
tf.drawString(buffer, 10, 90);
ofPopMatrix();
}
/* check vp_resp, set frame to video_player fbo (centered + scaled) */
void ofApp::displayFrame(){
/* set frame from recent vantage-point tree response */
player.SetFrame(server->vp_resp.image);
/* set video from recent vantage-point tree resp */
player.SetVideo(server->vp_resp.video, server->vp_resp.frame, server->is_active);
/* set image for model input, checks if the clients are active -> yes-frame no-video */
player.Update(img, server->is_active);
}
//--------------------------------------------------------------
void ofApp::keyPressed(int key){
if (key=OF_KEY_LEFT){
player.SetRandomFrame();
}
projection_mapper.keyPressed(key);
map.buttonPressed(key);
}
//--------------------------------------------------------------
void ofApp::keyReleased(int key){
projection_mapper.keyReleased(key);
}
//--------------------------------------------------------------
void ofApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void ofApp::mouseDragged(int x, int y, int button){
projection_mapper.mouseDragged(x, y, button);
}
//--------------------------------------------------------------
void ofApp::mousePressed(int x, int y, int button){
projection_mapper.mousePressed(x, y, button);
}
//--------------------------------------------------------------
void ofApp::mouseReleased(int x, int y, int button){
projection_mapper.mouseReleased(x, y, button);
}
//--------------------------------------------------------------

45
src/ofApp.h

@ -7,6 +7,14 @@
#include "Yolo.h"
#include <vector>
#include "Player.h"
#include "Map.h"
#include <chrono>
#include <iostream>
#include "ModelThread.h"
#include "Server.h"
#include "Request.h"
#include "ofxPiMapper.h"
#include "QuadSource/QuadSource.h"
class ofApp : public ofBaseApp{
@ -27,15 +35,22 @@ class ofApp : public ofBaseApp{
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
void inferEmotionalState();
void renderDepthMap();
void printEmotions();
void displayFrame();
float window_height;
float window_width;
ofImage img;
ofFbo fbo;
cv::Mat cvImg;
ofVideoGrabber webcam;
Player player;
bool firstRun = true;
Onnx depth;
Onnx depth_small;
Onnx yolo;
Onnx emotion;
ofxCvColorImage emoteImage;
@ -45,5 +60,33 @@ class ofApp : public ofBaseApp{
Emotef emo;
Yolo faceDetector;
float* emotional_data;
std::vector<types::BoxfWithLandmarks> detected_faces;
Map map;
ofShader depthToColourShader;
ofFbo rampedFbo;
ofTrueTypeFont tf;
ofFbo video_player_fbo;
ofFbo model_output_fbo;
ofFbo model_output_fbo_1;
ofFbo model_output_fbo_frame;
ofFbo screen_fbo;
ModelThread threadMap;
ModelThread threadVideo;
ModelThread threadYolo;
ModelThread threadEmotion;
std::unique_ptr<Server> server;
Vector7D embedding;
int app_h = 1080;
int app_w = 1512;
ofxPiMapper projection_mapper;
QuadSource quad_src;
};

Loading…
Cancel
Save