Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

How to return a WebGLTexture as Texture to use it into the cables patch #951

Open
duckt14 opened this issue Jan 3, 2025 · 2 comments
Open

Comments

@duckt14
Copy link

duckt14 commented Jan 3, 2025

Hi,

I'm coding a new op for face recognition using Haar Cascades. I'm using jsfeat library. I successfully created other op implementing jsfeat library in cables.
In the code, the op get a Texture as input, it converts it as canvas because I did it for the other ops and I'm not interested on performance; I'm doing it for didactic scope. As canvas, the code process the canvas' data and it does the face recognition. After that, I would like to draw the rectangles of the faces detected. I do it always using the image data of the canvas. After that, I created a variable called 'outputTexture' that contains the converted data from canvas to WebGLTexture type, using the gl.texImage2D() function.
I'm doing debugging of the results; the outputTexture variable contains correct data, but when I do the output of the data by setting the outTexture pin using the function outTexture.setRef(outputTexture), in the patch I always get a blank texture with all info values as Null.

I'm doing some mistakes?
There is a way to output the data as texture for using it in cables?
I pasted my entire code.

Thanks!
Enrico

const jsfeat = op.require('jsfeat');

// Inputs
const inUpdate = op.inTrigger("Input Trigger");
const textureIn = op.inTexture("Input Texture");
const inResize = op.inFloat("Input Resize Video", 1.0);
const inScaleFactor = op.inFloat("Input Scale Factor", -0.5);

// Outputs
const outUpdate = op.outTrigger("Output Trigger");
const outPosition = op.outObject("Output Object");
const outTexture = op.outTexture("Output Texture");

// Example of usage
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
const gl = op.patch.cgl.gl;

const frontalfaceClassifier = {…};

// Function to detect faces and draw rectangles
function doHaarCascadesDetection() {
    const texImg = textureIn.get();

    if (!texImg) {
        return;
    }

    const framebuffer = gl.createFramebuffer();
    gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);

    // Bind the texture to the framebuffer
    gl.framebufferTexture2D(
        gl.FRAMEBUFFER,
        gl.COLOR_ATTACHMENT0,
        gl.TEXTURE_2D,
        texImg.tex,
        0
    );

    // Verify if the framebuffer is complete
    const status = gl.checkFramebufferStatus(gl.FRAMEBUFFER);
    if (status !== gl.FRAMEBUFFER_COMPLETE) {
        console.error("Framebuffer incomplete. Status:", status);
        gl.bindFramebuffer(gl.FRAMEBUFFER, null);
        gl.deleteFramebuffer(framebuffer);
        return null;
    }

    // Read the pixels from the texture
    const pixels = new Uint8Array(texImg.width * texImg.height * 4); // RGBA = 4 bytes per pixel
    gl.readPixels(0, 0, texImg.width, texImg.height, gl.RGBA, gl.UNSIGNED_BYTE, pixels);

    // Unbind and delete the framebuffer
    gl.bindFramebuffer(gl.FRAMEBUFFER, null);
    gl.deleteFramebuffer(framebuffer);

    canvas.width = texImg.width;
    canvas.height = texImg.height;
    const w = canvas.width;
    const h = canvas.height;

    // Create an ImageData and transfer the data
    const imageData = ctx.createImageData(texImg.width, texImg.height);
    imageData.data.set(pixels);

    // Convert the image to grayscale
    const gray = new jsfeat.matrix_t(canvas.width, canvas.height, jsfeat.U8_t | jsfeat.C1_t);
    const ii_sum = new Int32Array((w+1)*(h+1));
    const ii_sqsum = new Int32Array((w+1)*(h+1));
    const ii_tilted = new Int32Array((w+1)*(h+1));

    // Process the image to prepare it for face recognition
    jsfeat.imgproc.grayscale(imageData.data, canvas.width, canvas.height, gray);
    jsfeat.imgproc.equalize_histogram(gray, gray);
    jsfeat.imgproc.compute_integral_image(gray, ii_sum, ii_sqsum, ii_tilted);

    // Detect faces
    const rects = jsfeat.haar.detect_multi_scale(
        ii_sum,
        ii_sqsum,
        ii_tilted,
        null,
        gray.cols,
        gray.rows,
        frontalfaceClassifier,
        1.15,
        2
    );

    // Group the rectangles
    const bestRect = jsfeat.haar.group_rectangles(rects, 1);

    // Draw the rectangles on the image
    ctx.putImageData(imageData, 0, 0);
    ctx.strokeStyle = "red";
    ctx.lineWidth = 2;

    for (const rect of bestRect) {
        ctx.strokeRect(rect.x, rect.y, rect.width, rect.height);
    }

    // Create a new texture with the result
    const outputTexture = gl.createTexture();
    gl.bindTexture(gl.TEXTURE_2D, outputTexture);

    // Get the image data from the canvas
    const imageDataToText = ctx.getImageData(0, 0, canvas.width, canvas.height).data;
    console.log(imageDataToText);
    console.log("Array length:", imageDataToText.length);
    console.log("Expected length:", canvas.width * canvas.height * 4);

    // Load the pixel data into the texture
    const level = 0;
    const internalFormat = gl.RGBA;
    const format = gl.RGBA;
    const type = gl.UNSIGNED_BYTE;

    // Upload the data to the texture
    gl.texImage2D(gl.TEXTURE_2D, level, internalFormat, canvas.width, canvas.height, 0, format, type, imageDataToText);

    gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
    gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
    gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);

    // Now the texture should be correctly updated
    gl.bindTexture(gl.TEXTURE_2D, null);

    if (gl.isTexture(outputTexture)) {
        console.log("Texture created successfully!");
    } else {
        console.log("Error creating the texture");
    }

    const error = gl.getError();
    if (error !== gl.NO_ERROR) {
        console.error("WebGL Error:", error);
    }

    // Return the best rectangle (optional)
    if (bestRect.length > 0) {
        outPosition.setRef(bestRect);
    } else {
        outPosition.setRef(null);
    }

    //* Debugging of outputTexture
    // Set the framebuffer to read pixels from the output texture
    const framebufferOut = gl.createFramebuffer();
    gl.bindFramebuffer(gl.FRAMEBUFFER, framebufferOut);

    // Bind the texture to the framebuffer
    gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, outputTexture, 0);

    // Check if the framebuffer is valid
    if (gl.checkFramebufferStatus(gl.FRAMEBUFFER) !== gl.FRAMEBUFFER_COMPLETE) {
        console.error('Framebuffer is not complete!');
    } else {
        // Read the pixels from the texture (RGB + A = 4 values per pixel)
        const pixelsOut = new Uint8Array(canvas.width * canvas.height * 4);  // RGBA
        gl.readPixels(0, 0, canvas.width, canvas.height, gl.RGBA, gl.UNSIGNED_BYTE, pixelsOut);

        // Now pixels contain the texture data, you can examine them
        console.log(pixelsOut);
    }

    // Clean up and free the framebuffer
    gl.bindFramebuffer(gl.FRAMEBUFFER, null);
    gl.deleteFramebuffer(framebuffer);

    // Set the output texture
    outTexture.setRef(outputTexture);

    // Trigger the output update
    outUpdate.trigger();
}

inUpdate.onTriggered = () => {
    if (inResize.get() >= 0) { doHaarCascadesDetection(); }
}
@duckt14 duckt14 changed the title How to return a WebGLTexture as Texture to use into the cables patch How to return a WebGLTexture as Texture to use it into the cables patch Jan 3, 2025
@pandrr
Copy link
Member

pandrr commented Jan 6, 2025

sounds cool! would love to see this!

you could output the canvas element and then use the op CanvasToTexture

(or use the code from this op internally)

@pandrr pandrr self-assigned this Jan 6, 2025
@duckt14
Copy link
Author

duckt14 commented Jan 10, 2025

Thanks, I solved it using the op CanvasToTexture! I didn't see it was there. 😬

These are some ops that I'm preparing for my students to talk about computer vision. Once the work is finished, I'll be very happy to share all the materials made with Cables!

Following, the correct code for the op and the screenshot of the patch, in case it could be useful to someone.

Thanks again! 😄

Enrico

const jsfeat = op.require('jsfeat');

// Inputs
const inUpdate = op.inTrigger("Input Trigger");
const textureIn = op.inTexture("Input Texture");
const inResize = op.inFloat("Input Resize Video", 1.0);
const inScaleFactor = op.inFloat("Input Scale Factor", -0.5);
const inEdgesDensity = op.inFloat("Input Edges Density", 0.15);
const inGaussianBlur = op.inFloat("Input Gaussian Blur Factor", 1);
const inCannyLow = op.inInt("Input Canny Low Thresh", 10);
const inCannyHigh = op.inInt("Input Canny High Thresh", 50);

// Outputs
const outUpdate = op.outTrigger("Output Trigger");
const outPosition = op.outObject("Output Object");
const outURL = op.outString("Output URL Texture");
const outTexture = op.outObject("Output Texture");

// Example of usage
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
const gl = op.patch.cgl.gl;

// Haar file for face detection
const frontalfaceClassifier = {…}

// Function to detect faces and return the <img> element with the src attribute set
function doHaarCascadesDetection() {
    const texImg = textureIn.get();  // Get the input texture

    if (!texImg) {
        return;  // If no texture is available, do nothing
    }

    // Create a temporary framebuffer
    const framebuffer = gl.createFramebuffer();
    gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);

    gl.framebufferTexture2D(
        gl.FRAMEBUFFER,
        gl.COLOR_ATTACHMENT0,
        gl.TEXTURE_2D,
        texImg.tex,
        0
    );

    const status = gl.checkFramebufferStatus(gl.FRAMEBUFFER);
    if (status !== gl.FRAMEBUFFER_COMPLETE) {
        console.error("Incomplete framebuffer. Status:", status);
        gl.bindFramebuffer(gl.FRAMEBUFFER, null);
        gl.deleteFramebuffer(framebuffer);
        return;
    }

    // Read pixels from the texture
    const pixels = new Uint8Array(texImg.width * texImg.height * 4);  // RGBA = 4 bytes per pixel
    gl.readPixels(0, 0, texImg.width, texImg.height, gl.RGBA, gl.UNSIGNED_BYTE, pixels);

    // Clean up the framebuffer
    gl.bindFramebuffer(gl.FRAMEBUFFER, null);
    gl.deleteFramebuffer(framebuffer);

    // Create a canvas to draw the data
    canvas.width = texImg.width;
    canvas.height = texImg.height;

    const imageData = ctx.createImageData(texImg.width, texImg.height);
    imageData.data.set(pixels);

    // Convert to grayscale
    const gray = new jsfeat.matrix_t(canvas.width, canvas.height, jsfeat.U8_t | jsfeat.C1_t);
    const ii_sum = new Int32Array((canvas.width + 1) * (canvas.height + 1));
    const ii_sqsum = new Int32Array((canvas.width + 1) * (canvas.height + 1));
    const ii_tilted = new Int32Array((canvas.width + 1) * (canvas.height + 1));
    const ii_canny = new Int32Array((canvas.width + 1) * (canvas.height + 1));

    jsfeat.imgproc.grayscale(imageData.data, canvas.width, canvas.height, gray);
    jsfeat.imgproc.equalize_histogram(gray, gray);
    jsfeat.imgproc.gaussian_blur(gray, gray, 2);
    jsfeat.imgproc.compute_integral_image(gray, ii_sum, ii_sqsum, frontalfaceClassifier.tilted ? ii_tilted : null);
    jsfeat.imgproc.canny(gray, gray, inCannyLow.get(), inCannyHigh.get());
    jsfeat.imgproc.compute_integral_image(gray, ii_canny, null, null);

    jsfeat.haar.edges_density = inEdgesDensity.get();

    // Face detection
    const rects = jsfeat.haar.detect_multi_scale(
        ii_sum,
        ii_sqsum,
        ii_tilted,
        ii_canny,
        gray.cols,
        gray.rows,
        frontalfaceClassifier,
        1.15,
        2
    );

    const bestRect = jsfeat.haar.group_rectangles(rects, 1);

    // Draw rectangles on the canvas
    ctx.putImageData(imageData, 0, 0);
    ctx.strokeStyle = "red";
    ctx.lineWidth = 2;

    for (const rect of bestRect) {
        ctx.strokeRect(rect.x, rect.y, rect.width, rect.height);
    }

    // Create an image and set the src
    const img = document.createElement("img");
    const dataURL = canvas.toDataURL("image/png");
    img.src = dataURL;

    outTexture.set(img);
}

inUpdate.onTriggered = () => {
    if (inResize.get() >= 0 && inEdgesDensity.get() >= 0 && inGaussianBlur.get() >= 0 && inCannyLow.get() >= 0 && inCannyHigh.get() >= 0) { doHaarCascadesDetection(); }
}

inEdgesDensity.onChange = () => {
    if (inResize.get() >= 0 && inEdgesDensity.get() >= 0 && inGaussianBlur.get() >= 0 && inCannyLow.get() >= 0 && inCannyHigh.get() >= 0) { doHaarCascadesDetection(); }
}

inGaussianBlur.onChange = () => {
    if (inResize.get() >= 0 && inEdgesDensity.get() >= 0 && inGaussianBlur.get() >= 0 && inCannyLow.get() >= 0 && inCannyHigh.get() >= 0) { doHaarCascadesDetection(); }
}
inCannyLow.onChange = () => {
    if (inResize.get() >= 0 && inEdgesDensity.get() >= 0 && inGaussianBlur.get() >= 0 && inCannyLow.get() >= 0 && inCannyHigh.get() >= 0) { doHaarCascadesDetection(); }
}

inCannyHigh.onChange = () => {
    if (inResize.get() >= 0 && inEdgesDensity.get() >= 0 && inGaussianBlur.get() >= 0 && inCannyLow.get() >= 0 && inCannyHigh.get() >= 0) { doHaarCascadesDetection(); }
}
Screenshot 2025-01-10 alle 12 08 42

@pandrr pandrr removed their assignment Jan 13, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants