|
|
@ -10,6 +10,8 @@ void ofApp::setup(){ |
|
|
|
portrait_fbo.allocate((ofGetWindowWidth() / 2), map_h, GL_RGBA); // this shoould be full with
|
|
|
|
portrait_pre_fbo.allocate((ofGetWindowWidth() / 3) * 1, map_h, GL_RGB); // this should be full, and same below
|
|
|
|
portrait_pre_fbo_alpha.allocate((ofGetWindowWidth() / 3) * 1, map_h, GL_RGBA); |
|
|
|
portrait_cropped.allocate((ofGetWindowWidth() / 3) * 1, map_h, GL_RGB); |
|
|
|
portrait_cropped_alpha.allocate((ofGetWindowWidth() / 3) * 1, map_h, GL_RGBA); |
|
|
|
comp_fbo.allocate(ofGetWindowWidth(), map_h, GL_RGBA); |
|
|
|
|
|
|
|
model_outptut_fbo.allocate(map_fbo.getWidth(), map_h, GL_RGB); |
|
|
@ -24,8 +26,10 @@ void ofApp::setup(){ |
|
|
|
model_image_esp.allocate(128 * 2, 168 * 2, OF_IMAGE_COLOR); |
|
|
|
model_image.allocate(map_fbo.getWidth(), map_h, OF_IMAGE_COLOR); |
|
|
|
model_image_portrait.allocate(portrait_pre_fbo_alpha.getWidth(), portrait_pre_fbo_alpha.getHeight(), OF_IMAGE_COLOR); |
|
|
|
model_image_portrait_cropped.allocate(portrait_pre_fbo_alpha.getWidth(), portrait_pre_fbo_alpha.getHeight(), OF_IMAGE_COLOR); |
|
|
|
|
|
|
|
map_pixels.allocate(map_fbo.getWidth(), map_h, OF_PIXELS_RGB); |
|
|
|
alpha_demo.allocate(portrait_pre_fbo_alpha.getWidth(), portrait_pre_fbo_alpha.getHeight(), OF_IMAGE_COLOR_ALPHA); |
|
|
|
|
|
|
|
/* allocated ofImages in esp_images */ |
|
|
|
for(int i = 0; i < 4; i++){ |
|
|
@ -52,6 +56,8 @@ void ofApp::setup(){ |
|
|
|
|
|
|
|
ORTCHAR_T* modelPath = "/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/apps/myApps/image-to-mesh/bin/data/models/depth_anything_v2_vits.onnx"; |
|
|
|
ORTCHAR_T* modelPath_Small = "/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/apps/myApps/image-to-mesh/bin/data/models/depth_anything_v2_vitb.onnx"; |
|
|
|
ORTCHAR_T* modelPath_Yolo = "/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/apps/myApps/image-to-mesh/bin/data/models/yolov5s-face.onnx"; |
|
|
|
ORTCHAR_T* modelPath_Indoor_Dynamic = "/home/cailean/Desktop/openframeworks/of_v0.12.0_linux64gcc6_release/apps/myApps/image-to-mesh/bin/data/models/depth_anything_v2_vitl.onnx"; |
|
|
|
|
|
|
|
/* bullet setup */ |
|
|
|
bullet.setup(nodes); |
|
|
@ -64,12 +70,14 @@ void ofApp::setup(){ |
|
|
|
/* onnx setup */ |
|
|
|
depth_onnx.Setup(modelPath, false, true); |
|
|
|
depth_onnx_esp.Setup(modelPath, false, true); |
|
|
|
depth_onnx_portrait.Setup(modelPath_Small, false, true); |
|
|
|
depth_onnx_portrait.Setup(modelPath, false, true); |
|
|
|
yolo_onnx.Setup(modelPath_Yolo, false, true); |
|
|
|
|
|
|
|
/* multi-thread setup */ |
|
|
|
depth_thread.setup(&model_image, &model_outptut_fbo, &depth_onnx); |
|
|
|
depth_esp.setup(&model_image_esp, &model_esp_out_fbo, &depth_onnx_esp); |
|
|
|
depth_portrait.setup(&model_image_portrait, &model_portrait_out_fbo, &depth_onnx_portrait); |
|
|
|
depth_portrait.setup(&model_image_portrait_cropped, &model_portrait_out_fbo, &depth_onnx_portrait); |
|
|
|
yolo.setupYolo(&model_image_portrait, &detected_faces, &yolo_onnx, &face_detector); |
|
|
|
|
|
|
|
/* camera settings for portrait */ |
|
|
|
CameraPosition cp; |
|
|
@ -86,9 +94,9 @@ void ofApp::setup(){ |
|
|
|
cam_positions.push_back(cp); |
|
|
|
|
|
|
|
/* settings */ |
|
|
|
// portrait_camera.enableOrtho();
|
|
|
|
// portrait_camera.setNearClip(-10000);
|
|
|
|
// portrait_camera.setFarClip(10000);
|
|
|
|
portrait_camera.enableOrtho(); |
|
|
|
portrait_camera.setNearClip(-10000); |
|
|
|
portrait_camera.setFarClip(10000); |
|
|
|
portrait_camera.setPosition(cam_positions[0].position); |
|
|
|
portrait_camera.setOrientation(cam_positions[0].rotation); |
|
|
|
portrait_camera.setScale(0.5); |
|
|
@ -111,7 +119,6 @@ void ofApp::setup(){ |
|
|
|
|
|
|
|
//--------------------------------------------------------------
|
|
|
|
void ofApp::update(){ |
|
|
|
|
|
|
|
server->update(esp_comp_fbo); |
|
|
|
|
|
|
|
float current_time = ofGetElapsedTimef(); |
|
|
@ -141,6 +148,7 @@ void ofApp::update(){ |
|
|
|
depth_thread.start(); |
|
|
|
depth_esp.start(); |
|
|
|
depth_portrait.start(); |
|
|
|
yolo.start(); |
|
|
|
} |
|
|
|
|
|
|
|
/* write pixels to model input image */ |
|
|
@ -156,11 +164,9 @@ void ofApp::update(){ |
|
|
|
try{ |
|
|
|
depth_thread.update(); |
|
|
|
|
|
|
|
|
|
|
|
/* set output to fbo's */ |
|
|
|
depth_onnx.SetPixels(model_outptut_fbo); |
|
|
|
|
|
|
|
|
|
|
|
} catch (exception e){ |
|
|
|
std::cout << "Model did not run" << std::endl; |
|
|
|
} |
|
|
@ -168,12 +174,12 @@ void ofApp::update(){ |
|
|
|
//mapper.update();
|
|
|
|
bullet.update(server->is_active, server->getChosenNode()); |
|
|
|
|
|
|
|
|
|
|
|
//std::cout << portrait_camera.getPosition() << " : " <<portrait_camera.getOrientationQuat()<< std::endl;
|
|
|
|
} |
|
|
|
|
|
|
|
//--------------------------------------------------------------
|
|
|
|
void ofApp::draw(){ |
|
|
|
|
|
|
|
ofPushStyle(); |
|
|
|
map_fbo_alpha.begin(); |
|
|
|
ofClear(0); |
|
|
@ -229,23 +235,21 @@ void ofApp::draw(){ |
|
|
|
map_fbo_post.draw(0, 0); |
|
|
|
/* actual map */ |
|
|
|
map_fbo_alpha.draw(0,0); |
|
|
|
//model_portrait_out_fbo.draw(0,0);
|
|
|
|
//portrait_pre_fbo_alpha.draw(model_portrait_out_fbo.getWidth(),0);
|
|
|
|
shaders.end(); |
|
|
|
comp_fbo.end(); |
|
|
|
|
|
|
|
comp_fbo.getTexture().setTextureMinMagFilter(GL_NEAREST, GL_NEAREST); |
|
|
|
|
|
|
|
//mapper.draw();
|
|
|
|
comp_fbo.draw(0,60); |
|
|
|
|
|
|
|
ofTranslate(0, 60); |
|
|
|
comp_fbo.draw(0, 0); |
|
|
|
|
|
|
|
//server->print();
|
|
|
|
} |
|
|
|
|
|
|
|
void ofApp::drawPortrait(){ |
|
|
|
|
|
|
|
float p_scale = 1 + ( ((1 + ofNoise(ofGetElapsedTimef() / 5)) / 2) * 0.5); |
|
|
|
|
|
|
|
portrait_pre_fbo.begin(); |
|
|
|
ofClear(0, 0, 0, 0); |
|
|
|
|
|
|
@ -261,53 +265,112 @@ void ofApp::drawPortrait(){ |
|
|
|
ofPushMatrix(); |
|
|
|
// Move to center of FBO
|
|
|
|
ofTranslate(portrait_pre_fbo.getWidth()/2, portrait_pre_fbo.getHeight()); // 0.8
|
|
|
|
// Apply scale
|
|
|
|
ofScale(1); |
|
|
|
// Move back by half the scaled image dimensions
|
|
|
|
ofTranslate(-scaledWidth/2, -scaledHeight); |
|
|
|
// Draw at 0,0 since we've already translated
|
|
|
|
last_chosen_node.img.draw(0, 0, scaledWidth, scaledHeight); |
|
|
|
ofPopMatrix(); |
|
|
|
|
|
|
|
portrait_pre_fbo.end(); |
|
|
|
|
|
|
|
portrait_pre_fbo_alpha.begin(); |
|
|
|
ofPushMatrix(); |
|
|
|
ofClear(0, 0, 0, 0); |
|
|
|
ofTranslate(portrait_pre_fbo.getWidth()/2, portrait_pre_fbo.getHeight()); |
|
|
|
// Apply scale
|
|
|
|
ofScale(1); |
|
|
|
// Move back by half the scaled image dimensions
|
|
|
|
ofTranslate(-scaledWidth/2, -scaledHeight); |
|
|
|
last_chosen_node.img.draw(0, 0, scaledWidth, scaledHeight); |
|
|
|
ofPopMatrix(); |
|
|
|
portrait_pre_fbo_alpha.end(); |
|
|
|
|
|
|
|
portrait_pre_fbo_alpha.readToPixels(alpha_pixels); |
|
|
|
alpha_demo.setFromPixels(alpha_pixels); |
|
|
|
|
|
|
|
float current_time = ofGetElapsedTimef(); |
|
|
|
|
|
|
|
if(portrait_needs_update || past_plane_size != plane_size) { |
|
|
|
past_plane_size = plane_size; |
|
|
|
depth_ready = false; |
|
|
|
mesh_ready = false; |
|
|
|
last_process_time = current_time; |
|
|
|
// we have full portrait & alpha portrait.
|
|
|
|
|
|
|
|
if(portrait_needs_update) { |
|
|
|
yolo.resetInferenceFlag(); |
|
|
|
|
|
|
|
ofPixels pix; |
|
|
|
portrait_pre_fbo.readToPixels(pix); |
|
|
|
model_image_portrait.setFromPixels(pix); |
|
|
|
|
|
|
|
// Queue the depth processing
|
|
|
|
depth_portrait.update(); |
|
|
|
detected_faces.clear(); |
|
|
|
yolo.update(); // This triggers the thread to run
|
|
|
|
|
|
|
|
portrait_needs_update = false; |
|
|
|
} |
|
|
|
|
|
|
|
// Give the depth processing some time to complete (adjust timeout as needed)
|
|
|
|
const float PROCESS_TIMEOUT = 0.1; // half second timeout
|
|
|
|
if(!depth_ready && (current_time - last_process_time) > PROCESS_TIMEOUT) { |
|
|
|
depth_onnx_portrait.SetPixels(model_portrait_out_fbo); |
|
|
|
depth_ready = true; |
|
|
|
if(yolo.checkInferenceComplete()) { |
|
|
|
/* check if face is detected, and crop potrait */ |
|
|
|
if(detected_faces.size() > 0){ |
|
|
|
for(auto& f : detected_faces){ |
|
|
|
float crop_width = f.box.x2 - f.box.x1; |
|
|
|
float crop_height = f.box.y2 - f.box.y1; |
|
|
|
|
|
|
|
// Calculate scaling to fit in FBO while maintaining aspect ratio
|
|
|
|
float scale = min( |
|
|
|
(float)portrait_cropped.getWidth() / crop_width, |
|
|
|
(float)portrait_cropped.getHeight() / crop_height |
|
|
|
); |
|
|
|
|
|
|
|
float scaledWidth = crop_width * scale; |
|
|
|
float scaledHeight = crop_height * scale; |
|
|
|
|
|
|
|
// Calculate position to center in FBO
|
|
|
|
float x = (portrait_cropped.getWidth() - scaledWidth) / 2; |
|
|
|
float y = (portrait_cropped.getHeight() - scaledHeight) / 2; |
|
|
|
|
|
|
|
portrait_cropped.begin(); |
|
|
|
ofClear(ofColor::black); |
|
|
|
ofPushMatrix(); |
|
|
|
// Move to center position
|
|
|
|
ofTranslate(x, y); |
|
|
|
// Draw the cropped section at the calculated size
|
|
|
|
model_image_portrait.drawSubsection( |
|
|
|
0, 0, // Draw at translated position
|
|
|
|
scaledWidth, scaledHeight, // Draw at scaled size
|
|
|
|
f.box.x1, f.box.y1, // Start crop from face detection
|
|
|
|
crop_width, crop_height // Amount to crop
|
|
|
|
); |
|
|
|
ofPopMatrix(); |
|
|
|
portrait_cropped.end(); |
|
|
|
|
|
|
|
portrait_cropped_alpha.begin(); |
|
|
|
ofClear(0); |
|
|
|
ofPushMatrix(); |
|
|
|
// Move to center position
|
|
|
|
ofTranslate(x, y); |
|
|
|
// Draw the cropped section at the calculated size
|
|
|
|
alpha_demo.drawSubsection( |
|
|
|
0, 0, // Draw at translated position
|
|
|
|
scaledWidth, scaledHeight, // Draw at scaled size
|
|
|
|
f.box.x1, f.box.y1, // Start crop from face detection
|
|
|
|
crop_width, crop_height // Amount to crop
|
|
|
|
); |
|
|
|
ofPopMatrix(); |
|
|
|
portrait_cropped_alpha.end(); |
|
|
|
|
|
|
|
ofPixels c_pix; |
|
|
|
portrait_cropped.readToPixels(c_pix); |
|
|
|
model_image_portrait_cropped.setFromPixels(c_pix); |
|
|
|
} |
|
|
|
} else { |
|
|
|
/* don't use cropped image for depth model */ |
|
|
|
ofPixels c_pix; |
|
|
|
portrait_pre_fbo.readToPixels(c_pix); |
|
|
|
model_image_portrait_cropped.setFromPixels(c_pix); |
|
|
|
} |
|
|
|
|
|
|
|
// Only generate mesh once depth is ready
|
|
|
|
if(depth_ready && !mesh_ready) { |
|
|
|
/* run depth model on cropped portrait */ |
|
|
|
depth_portrait.update(); |
|
|
|
yolo.resetInferenceFlag(); |
|
|
|
} |
|
|
|
|
|
|
|
if(depth_portrait.checkInferenceComplete()){ |
|
|
|
|
|
|
|
depth_onnx_portrait.SetPixels(model_portrait_out_fbo); |
|
|
|
float planeScale = 1.0; |
|
|
|
int planeWidth = portrait_pre_fbo_alpha.getWidth() * planeScale; |
|
|
|
int planeHeight = portrait_pre_fbo_alpha.getHeight() * planeScale; |
|
|
@ -316,21 +379,28 @@ void ofApp::drawPortrait(){ |
|
|
|
int planeRows = planeHeight / planeGridSize; |
|
|
|
|
|
|
|
plane.set(planeWidth, planeHeight, planeColumns, planeRows, OF_PRIMITIVE_TRIANGLES); |
|
|
|
plane.mapTexCoords(0, 0, planeWidth, planeHeight); |
|
|
|
custom_mesh = createCustomPlane(planeWidth, planeHeight, planeColumns, planeRows); |
|
|
|
|
|
|
|
//ofLog() << "Generated new mesh at time: " << current_time;
|
|
|
|
mesh_ready = true; |
|
|
|
|
|
|
|
/* send osc message when new node selected*/ |
|
|
|
server->sendOSCMessage(); |
|
|
|
depth_portrait.resetInferenceFlag(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void ofApp::drawPortraitZ(){ |
|
|
|
ofBackgroundGradient(ofColor::white, ofColor::orange, OF_GRADIENT_LINEAR); |
|
|
|
|
|
|
|
ofTexture tex_color = portrait_pre_fbo_alpha.getTexture(); |
|
|
|
/* if no faces found use full portrait texture */ |
|
|
|
ofTexture tex_color; |
|
|
|
|
|
|
|
if(detected_faces.size() > 0){ |
|
|
|
tex_color = portrait_cropped_alpha.getTexture(); |
|
|
|
} else { |
|
|
|
tex_color = portrait_pre_fbo_alpha.getTexture(); |
|
|
|
} |
|
|
|
|
|
|
|
ofTexture tex_depth = model_portrait_out_fbo.getTexture(); |
|
|
|
|
|
|
|
float minDepth = std::numeric_limits<float>::max(); |
|
|
@ -366,7 +436,7 @@ void ofApp::drawPortraitZ(){ |
|
|
|
p_depth.setUniformTexture("tex1", tex_depth, 1); |
|
|
|
ofFill(); |
|
|
|
ofTranslate(p_noise_x, p_noise_y, 0); |
|
|
|
custom_mesh.draw(); |
|
|
|
plane.draw(); |
|
|
|
p_depth.end(); |
|
|
|
ofPopMatrix(); |
|
|
|
ofDisableDepthTest(); |
|
|
|