Open In App

How to Implement Face Detection with ml5JS?

Last Updated : 17 Sep, 2024
Comments
Improve
Suggest changes
Like Article
Like
Report

Face detection is a fundamental technology in computer vision, used to detect and locate human faces in digital images or videos. It serves as a stepping stone for many other applications, including facial recognition, emotion detection, and augmented reality. we will explore how to implement face detection in JavaScript using the ml5.js library.ml5.js simplifies the integration of machine learning models in web applications, offering an easy-to-use API for developers.

Prerequisites

These are the following approaches to implement Face Detection with ml5JS:

Using Pre-trained Models (FaceAPI)

The easiest approach to implement face detection with ml5.js is by using the pre-trained FaceAPI. This API simplifies the process of detecting faces in real-time video streams, including adding bounding boxes around the detected faces.

Syntax:

ml5.faceApi(video, options, modelReady); 

Example: In this approach, we have used the pre-trained FaceAPI model from ml5.js to detect faces in real time through the webcam feed. The FaceAPI model is robust and efficient, which allows for quick detection of faces in both static images and video streams.

HTML
<!DOCTYPE html>
<html>

<head>
    <meta charset="UTF-8" />
    <title>face-api</title>
    <style>
        body {
            display: flex;
            justify-content: center;
            align-items: center;
            flex-direction: column;
            font-family: Arial, sans-serif;
            background-color: #f0f0f0;

        }

        h1 {
            margin-bottom: 20px;
        }

        #canvas {
            position: absolute;
            top: 50%;
            left: 50%;
            transform: translate(-50%, -50%);
            z-index: 1;
        }

        #video {
            position: absolute;
            top: 50%;
            left: 50%;
            transform: translate(-50%, -50%);
            z-index: 0;

            border-radius: 10px;
        }

        #loader {
            height: 50px;
            aspect-ratio: 2;
            border: 10px solid #000;
            box-sizing: border-box;
            background:
                radial-gradient(farthest-side, #fff 98%, #0000) left/20px 20px,
                radial-gradient(farthest-side, #fff 98%, #0000) left/20px 20px,
                radial-gradient(farthest-side, #fff 98%, #0000) center/20px 20px,
                radial-gradient(farthest-side, #fff 98%, #0000) right/20px 20px,
                #000;
            background-repeat: no-repeat;
            filter: blur(4px) contrast(10);
            animation: l14 1s infinite;
        }

        @keyframes l14 {
            100% {
                background-position: right, left, center, right
            }
        }
    </style>
    <h1>Face Detection with ml5.js</h1>
    <div id="loader"></div>
    <video id="video" width="640" height="480" autoplay></video>
    <canvas id="canvas" width="640" height="480"></canvas>
    <script src=
"https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.1.9/p5.js"></script>
    <script src=
"https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.1.9/addons/p5.sound.js"></script>
    <script src=
"https://cdn.jsdelivr.net/gh/ml5js/Intro-ML-Arts-IMA@ml5-build-10-7-19/ml5_build/ml5.min.js"></script>

    <meta charset="utf-8" />
</head>

<body>
    <script src="app.js"></script>
</body>

</html>
JavaScript
let faceapi;
let detections = [];

let video;
function setup() {
    // Create the video capture
    video = createCapture(VIDEO);
    video.size(640, 480);
    video.hide();

    // Create a canvas to draw on
    createCanvas(640, 480);

    // Initialize the FaceAPI model
    // making face details
    const faceOptions = {
        withLandmarks: true,
        withExpressions: true,
        withDescriptors: true,
        minConfidence: 0.5,
    };
    faceapi = ml5.faceApi(video, faceOptions, faceReady);
}

// on face detection
function faceReady() {
    faceapi.detect(gotFaces);
}
// Got faces:
function gotFaces(error, result) {
    if (error) {
        console.log(error);
        return;
    }

    detections = result; //Now all the data in this detections:

    clear(); //Draw transparent background;:
    drawDetections(detections); //Draw detection box:
    faceapi.detect(gotFaces); // Call the function again at here:
}
function drawDetections() {
    image(video, 0, 0, width, height);

    if (detections.length > 0) {
        console.log("drawDetections")
        for (let i = 0; i < detections.length; i++) {
            let { alignedRect } = detections[i];
            let { _x, _y, _width, _height } = alignedRect._box;
            noFill();
            stroke(0, 255, 0);
            strokeWeight(2);
            rect(_x, _y, _width, _height);
        }
    }
}

Output:

Screenshot-2024-09-13-224859

Customizing Detection with Landmarks and Descriptors

This method not only detects faces but also identifies key landmarks like eyes, nose, and mouth, enabling advanced features such as emotion recognition and gesture tracking. It also includes unique descriptors for face identification, making it perfect for detailed applications in augmented reality and security.

Syntax:

ml5.faceApi(video, { withLandmarks: true, withDescriptors: true }, modelReady); 

Example: This example enhances the previous approach by adding facial landmarks with red circles, offering more detailed face analysis.

HTML
<!DOCTYPE html>
<html>

<head>
    <meta charset="UTF-8" />
    <title>face-api</title>
    <style>
        #canvas {
            position: absolute;
            top: 50%;
            left: 50%;
            transform: translate(-50%, -50%);
            z-index: 1;
        }

        #video {
            position: absolute;
            top: 50%;
            left: 50%;
            transform: translate(-50%, -50%);
            z-index: 0;
            border: 3px #fff solid;
            border-radius: 10px;
        }
    </style>
    <h1>Face Detection with ml5.js</h1>
    <video id="video" width="640" height="480" autoplay>
    </video>
    <canvas id="canvas" width="640" height="480">
    </canvas>
    <script src=
"https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.1.9/p5.js">
    </script>
    <script src=
"https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.1.9/addons/p5.sound.js">
    </script>
    <script src=
"https://cdn.jsdelivr.net/gh/ml5js/Intro-ML-Arts-IMA@ml5-build-10-7-19/ml5_build/ml5.min.js">

    </script>

    <meta charset="utf-8" />
</head>

<body>
    <script src="app.js"></script>
</body>

</html>
JavaScript
let faceapi;
let detections = [];
let video;

function setup() {
    // Create the video capture
    video = createCapture(VIDEO);
    video.size(640, 480);
    video.hide();

    // Create a canvas to draw on
    createCanvas(640, 480);

    // Initialize the FaceAPI model
    // making face details
    const faceOptions = {
        withLandmarks: true,
        withExpressions: true,
        withDescriptors: true,
        minConfidence: 0.5,
    };
    faceapi = ml5.faceApi(video, faceOptions, faceReady);
}

// on face detection
function faceReady() {
    faceapi.detect(gotFaces);
}
// Got faces:
function gotFaces(error, result) {
    if (error) {
        console.log(error);
        return;
    }

    clear(); //Draw transparent background;:
    drawLandmarks(detections); //// Draw all the face points:
    faceapi.detect(gotFaces); // Call the function again at here:
}


function drawLandmarks(detections) {
    if (detections.length > 0) {
        console.log("drawLandmarks")
        //If at least 1 face is detected:
        for (f = 0; f < detections.length; f++) {
            let points = detections[f].landmarks.positions;
            for (let i = 0; i < points.length; i++) {
                stroke(47, 255, 0); // points color
                strokeWeight(5); // points weight
                point(points[i]._x, points[i]._y);
            }
        }
    }
}

Output:


Next Article

Similar Reads