Skip to content

Commit

Permalink
Face detection, background blur and eye gaze correction example
Browse files Browse the repository at this point in the history
  • Loading branch information
eehakkin committed Feb 5, 2022
1 parent 6cee839 commit 2f69d95
Showing 1 changed file with 112 additions and 0 deletions.
112 changes: 112 additions & 0 deletions index.html
Original file line number Diff line number Diff line change
Expand Up @@ -844,6 +844,118 @@ <h3>Examples</h3>
.pipeTo(e.data.videoWritable);
}
</pre>
<pre class="example">
// main.js:
// Open camera.
const stream = navigator.mediaDevices.getUserMedia({video: true});
const [videoTrack] = stream.getVideoTracks();

// Apply constraints.
let detectFaces = false;
let faceDetectionMode;
const videoCapabilities = videoTrack.getCapabilities();
if (videoCapabilities.backgroundBlur) {
// The platform supports background blurring.
// Let's apply the background blur constraint here.
await videoTrack.applyConstraints({
advanced: [{backgroundBlur: videoCapabilities.backgroundBlur.max}]
});
} else if ((videoCapabilities.faceDetectionMode || []).includes('contour')) {
// The platform supports face contour detection but not background blurring.
// Let's apply the face contour detection constraint here so that
// the worker has the detected face contours available while it blurs
// the background.
faceDetectionMode ||= 'contour';
await videoTrack.applyConstraints({
advanced: [{faceDetectionMode}]
});
} else {
// The platform does not support background blurring nor face contour
// detection. Therefore, the worker has to detect face contours before it
// blurs the background.
detectFaces = true;
}
if (videoCapabilities.eyeGazeCorrection) {
// The platform supports eye gaze correction.
// Let's apply the eye gaze correction constraint here.
await videoTrack.applyConstraints({
advanced: [{eyeGazeCorrection: true}]
});
} else if ((videoCapabilities.faceDetectionLandmarks || []).includes(true)) {
// The platform supports face landmark detection but not eye gaze correction.
// Let's apply the face landmark detection constraints here so that
// the worker has the detected face landmarks available while it corrects
// the eye gaze.
faceDetectionMode ||= 'presence';
await videoTrack.applyConstraints({
advanced: [{
faceDetectionLandmarks: true,
faceDetectionMode
}]
});
} else {
// The platform does not support eye gaze correction nor face landmark
// detection. Therefore, the worker has to detect face landmarks before it
// corrects the eye gaze.
detectFaces = true;
}

// Use a video worker and show to user.
const videoElement = document.querySelector("video");
const videoGenerator = new MediaStreamTrackGenerator({kind: 'video'});
const videoProcessor = new MediaStreamTrackProcessor({track: videoTrack});
const videoSettings = videoTrack.getSettings();
const videoWorker = new Worker('video-worker.js');
videoWorker.postMessage({
options: {
blurBackground: !videoSettings.backgroundBlur,
correctEyeGaze: !videoSettings.eyeGazeCorrection,
detectFaces
},
videoReadable: videoProcessor.readable,
videoWritable: videoGenerator.writable
}, [videoProcessor.readable, videoGenerator.writable]);
videoElement.onloadedmetadata = e => videoElement.play();
videoElement.srcObject = new MediaStream([videoGenerator]);

// video-worker.js:
self.onmessage = async function(e) {
// Load custom libraries which may utilize TensorFlow and/or WASM.
const requiredScripts = [].concat(
e.data.options.blurBackground ? 'background.js' : [],
e.data.options.correctEyeGaze ? 'eye-gaze.js' : [],
e.data.options.detectFaces ? 'face.js' : []
);
importScripts(...requiredScripts);
// Create a video transformer.
const videoTransformer = new TransformStream({
async transform(videoFrame, controller) {
// Detect faces or retrieve detected faces.
const detectedFaces =
e.data.options.detectFaces
? await detectFaces(videoFrame)
: videoFrame.detectedFaces;
// Blur the background if needed.
if (e.data.options.blurBackground) {
const newVideoFrame = await blurBackground(videoFrame, detectedFaces);
videoFrame.close();
videoFrame = newVideoFrame;
}
// Correct the eye gaze if needed.
if (e.data.options.correctEyeGaze &amp;&amp; (detectedFaces || []).length &gt; 0) {
const newVideoFrame = await correctEyeGaze(videoFrame, detectedFaces);
videoFrame.close();
videoFrame = newVideoFrame;
}
controller.enqueue(videoFrame);
}
});
// Insert the video transformer into the video pipeline.
e.data.videoReadable
.pipeThrough(videoTransformer)
.pipeTo(e.data.videoWritable);
};
</pre>
</section>
</section>
</body>
Expand Down

0 comments on commit 2f69d95

Please sign in to comment.