You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
215 lines
4.9 KiB
215 lines
4.9 KiB
let detections = [];
|
|
let fps;
|
|
let video;
|
|
let boxDimensions;
|
|
let boxScale;
|
|
let canvas;
|
|
let face;
|
|
let newImg;
|
|
let lm;
|
|
let staticImage;
|
|
|
|
let faces = [];
|
|
|
|
// ROI
|
|
// let roi;
|
|
// let lmBuffer;
|
|
const detectionOptions = {
|
|
withLandmarks: true,
|
|
withExpressions: true,
|
|
withDescriptors: true,
|
|
minConfidence: 0.5,
|
|
Mobilenetv1Model: "models",
|
|
FaceLandmarkModel: "models",
|
|
FaceRecognitionModel: "models",
|
|
FaceExpressionModel: "models",
|
|
};
|
|
|
|
function setup() {
|
|
|
|
frameRate(60);
|
|
pixelDensity(1);
|
|
canvas = createCanvas(windowWidth, windowHeight);
|
|
video = createCapture(VIDEO);
|
|
video.size(480, 360);
|
|
video.hide();
|
|
newImg = createImage(480, 360);
|
|
staticImage = createGraphics(width, height);
|
|
staticImage.background(0, 0, 0);
|
|
faceapi = ml5.faceApi(video, detectionOptions, faceReady);
|
|
}
|
|
|
|
function draw() {
|
|
image(staticImage, 0, 0);
|
|
background(0, 0, 0, 0);
|
|
drawFaces();
|
|
removeFinishedImages();
|
|
faceapi.detect(gotFaces);
|
|
|
|
}
|
|
|
|
function faceReady() {
|
|
faceapi.detect(gotFaces);
|
|
}
|
|
|
|
function gotFaces(error, result) {
|
|
if (error) {
|
|
console.log(error);
|
|
return;
|
|
}
|
|
|
|
detections = result;
|
|
|
|
if (detections) {
|
|
if (detections.length > 0) {
|
|
drawLandmarkMask(detections);
|
|
}
|
|
}
|
|
}
|
|
|
|
function drawLandmarkMask(detections) {
|
|
if (detections.length > 0) {
|
|
for (f = 0; f < detections.length; f++) {
|
|
let { _x, _y, _width, _height } = detections[f].alignedRect._box;
|
|
let points = detections[f].landmarks.positions;
|
|
|
|
updateGraphicsObject(points);
|
|
updateNewImage(_x, _y, _width, _height);
|
|
|
|
face = new Face(roi, detections[f].expressions);
|
|
|
|
if(face.emotionCheck()){
|
|
faces.push(face);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
function updateNewImage(_x, _y, _width, _height) {
|
|
|
|
// Cross-checks with mask, to see what pixels are transparent, if they are not, use those pixels in final image
|
|
lm.loadPixels();
|
|
video.loadPixels();
|
|
newImg.loadPixels();
|
|
|
|
for (let i = 0; i < newImg.pixels.length; i += 4) {
|
|
if (lm.pixels[i + 3] == 255) {
|
|
newImg.pixels[i + 0] = video.pixels[i + 0];
|
|
newImg.pixels[i + 1] = video.pixels[i + 1];
|
|
newImg.pixels[i + 2] = video.pixels[i + 2];
|
|
newImg.pixels[i + 3] = 180;
|
|
} else {
|
|
newImg.pixels[i + 0] = 0
|
|
newImg.pixels[i + 1] = 0
|
|
newImg.pixels[i + 2] = 0
|
|
newImg.pixels[i + 3] = 0
|
|
}
|
|
}
|
|
|
|
lm.remove();
|
|
|
|
newImg.updatePixels();
|
|
|
|
roi = newImg.get(_x, _y, _width, _height);
|
|
}
|
|
|
|
function updateGraphicsObject(points) {
|
|
|
|
// Creates graphics mask with face mesh
|
|
|
|
lm = createGraphics(480, 360);
|
|
lm.background(0, 0, 0, 0)
|
|
lm.fill(0);
|
|
lm.beginShape();
|
|
lm.vertex(points[19]._x,points[19]._y )
|
|
|
|
for (let i = 0; i < 17; i++) {
|
|
lm.vertex(points[i]._x, points[i]._y);
|
|
}
|
|
|
|
lm.vertex(points[24]._x,points[24]._y )
|
|
lm.endShape(CLOSE);
|
|
}
|
|
|
|
function drawFaces() {
|
|
for(let i = 0; i < faces.length; i++) {
|
|
faces[i].drawImage();
|
|
}
|
|
}
|
|
|
|
function removeFinishedImages() {
|
|
for (let i = faces.length - 1; i >= 0; i--) {
|
|
if(faces[i].isFinished) {
|
|
staticImage.image(faces[i].image, faces[i].position.x, faces[i].position.y);
|
|
faces.splice(i, 1);
|
|
}
|
|
}
|
|
}
|
|
|
|
function drawBoxs(detections) {
|
|
if (detections.length > 0) {
|
|
//If at least 1 face is detected:
|
|
for (f = 0; f < detections.length; f++) {
|
|
let { _x, _y, _width, _height } = detections[f].alignedRect._box;
|
|
stroke(44, 169, 225);
|
|
strokeWeight(1);
|
|
noFill();
|
|
rect(_x, _y, _width, _height);
|
|
}
|
|
}
|
|
}
|
|
|
|
function drawLandmarks(detections) {
|
|
if (detections.length > 0) {
|
|
//If at least 1 face is detected:
|
|
for (f = 0; f < detections.length; f++) {
|
|
let points = detections[f].landmarks.positions;
|
|
for (let i = 0; i < points.length; i++) {
|
|
stroke(44, 169, 225);
|
|
strokeWeight(3);
|
|
point(points[i]._x, points[i]._y);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
function drawExpressions(detections, x, y, textYSpace) {
|
|
if (detections.length > 0) {
|
|
//If at least 1 face is detected:
|
|
let { neutral, happy, angry, sad, disgusted, surprised, fearful } =
|
|
detections[0].expressions;
|
|
textFont("Helvetica Neue");
|
|
textSize(14);
|
|
noStroke();
|
|
fill(44, 169, 225);
|
|
|
|
text("neutral: " + nf(neutral * 100, 2, 2) + "%", x, y);
|
|
text("happiness: " + nf(happy * 100, 2, 2) + "%", x, y + textYSpace);
|
|
text("anger: " + nf(angry * 100, 2, 2) + "%", x, y + textYSpace * 2);
|
|
text("sad: " + nf(sad * 100, 2, 2) + "%", x, y + textYSpace * 3);
|
|
text(
|
|
"disgusted: " + nf(disgusted * 100, 2, 2) + "%",
|
|
x,
|
|
y + textYSpace * 4
|
|
);
|
|
text(
|
|
"surprised: " + nf(surprised * 100, 2, 2) + "%",
|
|
x,
|
|
y + textYSpace * 5
|
|
);
|
|
text(
|
|
"fear: " + nf(fearful * 100, 2, 2) + "%",
|
|
x,
|
|
y + textYSpace * 6
|
|
);
|
|
} else {
|
|
//If no faces is detected:
|
|
text("neutral: ", x, y);
|
|
text("happiness: ", x, y + textYSpace);
|
|
text("anger: ", x, y + textYSpace * 2);
|
|
text("sad: ", x, y + textYSpace * 3);
|
|
text("disgusted: ", x, y + textYSpace * 4);
|
|
text("surprised: ", x, y + textYSpace * 5);
|
|
text("fear: ", x, y + textYSpace * 6);
|
|
}
|
|
}
|