Skip to content

Commit

Permalink
Added displaying of message when loading models
Browse files Browse the repository at this point in the history
  • Loading branch information
simonguest committed Nov 14, 2023
1 parent fa1a735 commit bce6ccc
Show file tree
Hide file tree
Showing 8 changed files with 124 additions and 92 deletions.
1 change: 1 addition & 0 deletions index.html
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
<div id="run-area">
<div id="canvas-container">
<canvas id="image-canvas"></canvas>
<canvas id="message-canvas" width="512" height="512"></canvas>
</div>
<video id="hidden-video" width="1024" height="1024" autoplay></video>
<canvas id="hidden-video-canvas" width="1024" height="1024"></canvas>
Expand Down
4 changes: 2 additions & 2 deletions src/cv/imageSegmentation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ export class ImageSegmentation {

}

public async segment(mp: MediaPipe, image: ImageData, model: ModelData, delegate: string) {
public async segment(mp: MediaPipe, image: ImageData, model: ModelData, delegate: string, displayMessage: any, clearMessage: any) {
Debug.write("Segment Image");
return await mp.segment(image, model, delegate);
return await mp.segment(image, model, delegate, displayMessage, clearMessage);
}
}
43 changes: 39 additions & 4 deletions src/cv/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ export class CV {
*************************/

private imageCanvas = document.getElementById("image-canvas") as HTMLCanvasElement;
private messageCanvas = document.getElementById("message-canvas") as HTMLCanvasElement;

private hiddenImageCanvas = document.getElementById("hidden-image-canvas") as HTMLCanvasElement;
private boundingBoxCanvas = document.getElementById("bounding-box-canvas") as HTMLCanvasElement;
private segmentationMaskCanvas = document.getElementById("segmentation-mask-canvas") as HTMLCanvasElement;
Expand All @@ -57,7 +59,7 @@ export class CV {

public async startWebcam(deviceId: string) {
try {
await this.webcam.start(deviceId, this.imageCanvas, this.width, this.height);
await this.webcam.start(deviceId, this.imageCanvas, this.width, this.height, this.displayMessage, this.clearMessage);
} catch (err) {
Debug.write(err);
}
Expand Down Expand Up @@ -121,7 +123,7 @@ export class CV {
*************************/

public async detectObjects(image: ImageData, model: ModelData, delegate: string) {
return this.objectDetection.detectObjects(this.mp, image, model, delegate);
return this.objectDetection.detectObjects(this.mp, image, model, delegate, this.displayMessage, this.clearMessage);
}

public async drawBoundingBoxes(result: ObjectDetectorResult) {
Expand All @@ -137,7 +139,7 @@ export class CV {
*************************/

public async segment(image: ImageData, model: ModelData, delegate: string) {
return this.imageSegmentation.segment(this.mp, image, model, delegate);
return this.imageSegmentation.segment(this.mp, image, model, delegate, this.displayMessage, this.clearMessage);
}

public async colorSegment(data: { result: ImageSegmenterResult, category: number }, rgb: number[]) {
Expand All @@ -156,7 +158,7 @@ export class CV {
*************************/

public async detectPose(image: ImageData, model: ModelData, delegate: string) {
return await this.poseEstimation.detectPose(this.mp, image, model, delegate);
return await this.poseEstimation.detectPose(this.mp, image, model, delegate, this.displayMessage, this.clearMessage);
}

public async drawPose(pose: PoseLandmarkerResult) {
Expand Down Expand Up @@ -215,15 +217,47 @@ export class CV {
public clearCanvasCollection() {
Debug.write("Clearing canvas collection");
this.clearCanvas(this.imageCanvas);
this.clearCanvas(this.messageCanvas);
this.clearCanvas(this.boundingBoxCanvas);
this.clearCanvas(this.segmentationMaskCanvas);
this.clearCanvas(this.poseCanvas);
this.clearCanvas(this.userCanvas);
}

public async displayMessage(message: string) {
const wait = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
const messageCanvas = document.getElementById("message-canvas") as HTMLCanvasElement;
const ctx = messageCanvas.getContext("2d");

Debug.write("Displaying message");
ctx.clearRect(0, 0, ctx.canvas.clientWidth, ctx.canvas.clientHeight);
// Create a black background with 50% transparency
ctx.fillStyle = "rgba(0, 0, 0, 0.5)";
ctx.fillRect(0, 0, ctx.canvas.clientWidth, ctx.canvas.clientHeight);
// Display the message
ctx.font = "30px Arial";
ctx.fillStyle = "white";
ctx.textAlign = "center";
ctx.fillText(message, ctx.canvas.clientWidth / 2, ctx.canvas.clientHeight / 2);

// Wait for 1ms to allow the message to be displayed
await wait(1);
}

public async clearMessage(){
const wait = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
const messageCanvas = document.getElementById("message-canvas") as HTMLCanvasElement;
const ctx = messageCanvas.getContext("2d");

Debug.write("Clearing message");
ctx.clearRect(0, 0, ctx.canvas.clientWidth, ctx.canvas.clientHeight);
await wait(1);
}

public async init() {
Debug.write("Initializing CV");
Debug.write("Detecting Webcam devices");
await this.displayMessage("Searching for cameras");
try {
let currentStream = await navigator.mediaDevices.getUserMedia({ video: true });
let devices = await navigator.mediaDevices.enumerateDevices();
Expand All @@ -235,6 +269,7 @@ export class CV {
} catch (err) {
Debug.write("Error detecting webcam devices: " + err);
}
await this.clearMessage();

Debug.write("Initializing mediapipe")
await this.mp.init();
Expand Down
12 changes: 9 additions & 3 deletions src/cv/mediapipe.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,10 @@ export class MediaPipe {
this.models[path] = model;
}

public async detectObjects(image: ImageData, model: ModelData, delegate) {
public async detectObjects(image: ImageData, model: ModelData, delegate, displayMessage: any, clearMessage: any) {
let detector = this.getModel(`${model.path}_${delegate}`);
if (!detector) {
displayMessage("Loading model...");
detector = await ObjectDetector.createFromOptions(this.vision, {
baseOptions: {
modelAssetPath: model.path,
Expand All @@ -41,15 +42,17 @@ export class MediaPipe {
runningMode: "IMAGE",
});
this.cacheModel(`${model.path}_${delegate}`, detector);
clearMessage();
}

return detector.detect(image);
}

public async segment(image: ImageData, model: ModelData, delegate: string) {
public async segment(image: ImageData, model: ModelData, delegate: string, displayMessage: any, clearMessage: any) {
if (delegate !== 'GPU' && delegate !== 'CPU') return;
let segmenter = this.getModel(`${model.path}_${delegate}`);
if (!segmenter) {
displayMessage("Loading model...")
segmenter = await ImageSegmenter.createFromOptions(this.vision, {
baseOptions: {
modelAssetPath: model.path,
Expand All @@ -60,15 +63,17 @@ export class MediaPipe {
outputConfidenceMasks: false
});
this.cacheModel(`${model.path}_${delegate}`, segmenter);
clearMessage();
}

return { result: segmenter.segment(image), category: model.category };
}

public async detectPose(image: ImageData, model: ModelData, delegate: string) {
public async detectPose(image: ImageData, model: ModelData, delegate: string, displayMessage: any, clearMessage: any) {
if (delegate !== 'GPU' && delegate !== 'CPU') return;
let poseLandmarker = this.getModel(`${model.path}_${delegate}`);
if (!poseLandmarker) {
displayMessage("Loading model...")
poseLandmarker = await PoseLandmarker.createFromOptions(this.vision, {
baseOptions: {
modelAssetPath: model.path,
Expand All @@ -78,6 +83,7 @@ export class MediaPipe {
numPoses: 1,
});
this.cacheModel(`${model.path}_${delegate}`, poseLandmarker);
clearMessage();
}
return poseLandmarker.detect(image);
}
Expand Down
10 changes: 5 additions & 5 deletions src/cv/objectDetection.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@ import { ObjectDetectorResult } from "@mediapipe/tasks-vision";

export class ObjectDetection {

private BOUNDING_BOX_COLOR : string = "#777777";
private BOUNDING_BOX_FONT : string = "14px Arial";
private BOUNDING_BOX_FONT_COLOR : string = "#ffffff";
private BOUNDING_BOX_COLOR: string = "#777777";
private BOUNDING_BOX_FONT: string = "14px Arial";
private BOUNDING_BOX_FONT_COLOR: string = "#ffffff";

public async detectObjects(mediapipe: MediaPipe, image: ImageData, model: ModelData, delegate: string) {
public async detectObjects(mediapipe: MediaPipe, image: ImageData, model: ModelData, delegate: string, displayMessage: any, clearMessage: any) {
Debug.write("Detecting objects");
return await mediapipe.detectObjects(image, model, delegate);
return await mediapipe.detectObjects(image, model, delegate, displayMessage, clearMessage);
}

public async displayBoundingBoxes(canvas: HTMLCanvasElement, result: ObjectDetectorResult) {
Expand Down
4 changes: 2 additions & 2 deletions src/cv/poseEstimation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ import { PoseLandmarkerResult } from "@mediapipe/tasks-vision";

export class PoseEstimation {

public async detectPose(mp: MediaPipe, image: ImageData, model: ModelData, delegate: string) {
public async detectPose(mp: MediaPipe, image: ImageData, model: ModelData, delegate: string, displayMessage: any, clearMessage: any) {
Debug.write("Detecting pose");
return await mp.detectPose(image, model, delegate);
return await mp.detectPose(image, model, delegate, displayMessage, clearMessage);
}

public async displayPose(canvas: HTMLCanvasElement, pose: PoseLandmarkerResult, width: number, height: number) {
Expand Down
22 changes: 4 additions & 18 deletions src/cv/webcam.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,24 +12,7 @@ export class Webcam {

private wait = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));

private displayStartingWebCamImage(canvas: HTMLCanvasElement, width: number, height: number) {
canvas.width = width;
canvas.height = height;
const ctx = canvas.getContext("2d");
ctx.fillRect(0, 0, width, height);
ctx.font = '28px sans-serif'; // You can adjust the size and font-family to your liking
ctx.fillStyle = 'white';
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';

// Draw the text in the center of the canvas
ctx.fillText('Starting Webcam', canvas.width / 2, canvas.height / 2);
}

public async start(deviceId: string, canvas: HTMLCanvasElement, width: number, height: number) {
Debug.write("Displaying starting webcam image");
this.displayStartingWebCamImage(canvas, width, height);

public async start(deviceId: string, canvas: HTMLCanvasElement, width: number, height: number, displayMessage: any, clearMessage: any) {
Debug.write(`Starting webcam`);
return new Promise(async (resolve, reject) => {
if (this.currentStream) {
Expand All @@ -38,17 +21,20 @@ export class Webcam {
}
let that = this;
if (deviceId === "no-webcam") return reject("No webcam detected");
displayMessage("Starting camera...");
navigator.mediaDevices.getUserMedia({video: {deviceId: deviceId}})
.then(async function (stream) {
that.currentStream = stream;
that.videoElement.srcObject = stream;
that.videoElement.addEventListener("canplay", that.canPlayCallback, false);
await that.videoElement.play();
await that.wait(500); // Wait for the webcam to receive enough light after startup before capturing an image
clearMessage();
resolve(that.currentStream);
})
.catch(function (err) {
Debug.write("An error occurred: " + err);
clearMessage();
reject("An error occurred: " + err);
});
});
Expand Down
Loading

0 comments on commit bce6ccc

Please sign in to comment.