From ca38b7c7f4578167415009648a8bad57eef64496 Mon Sep 17 00:00:00 2001 From: Somdev Date: Tue, 4 Feb 2025 20:04:05 +0530 Subject: [PATCH] bounding box implemented --- .../RealtimeFaceDetection.tsx | 61 ++++++++++++++----- 1 file changed, 47 insertions(+), 14 deletions(-) diff --git a/components/realtimeFaceDetection/RealtimeFaceDetection.tsx b/components/realtimeFaceDetection/RealtimeFaceDetection.tsx index 0439566..d9a4697 100644 --- a/components/realtimeFaceDetection/RealtimeFaceDetection.tsx +++ b/components/realtimeFaceDetection/RealtimeFaceDetection.tsx @@ -36,32 +36,65 @@ const RealtimeFaceDetection = () => { }, [toast]); const detectFace = async () => { - if (!webcamRef.current || !webcamRef.current.video) return; + if (!webcamRef.current || !webcamRef.current.video || !canvasRef.current) + return; const video = webcamRef.current.video; - const canvas = document.createElement("canvas"); + const canvas = canvasRef.current; const context = canvas.getContext("2d"); if (!context) return; + // Set canvas size to match video canvas.width = video.videoWidth; canvas.height = video.videoHeight; - context.drawImage(video, 0, 0, canvas.width, canvas.height); + context.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings - // Convert the canvas to a Blob (image file) - canvas.toBlob(async (blob) => { - if (!blob) return; + // Mirror the canvas context to match the mirrored video + context.translate(canvas.width, 0); // Move the origin to the right side of the canvas + context.scale(-1, 1); // Flip the context horizontally - // Detect face - const detections = await faceapi - .detectSingleFace(video, new faceapi.TinyFaceDetectorOptions()) - .withFaceLandmarks() - .withFaceDescriptor(); + // Detect face + const detections = await faceapi + .detectSingleFace(video, new faceapi.TinyFaceDetectorOptions()) + .withFaceLandmarks() + .withFaceDescriptor(); - if (detections) { - sendFaceDataToAPI(blob); + if (detections) { + // Draw bounding box + const { x, y, width, height } = detections.detection.box; + context.strokeStyle = "red"; // Box color + context.lineWidth = 3; + context.strokeRect(x, y, width, height); + + // Capture the face as an image + const imageCanvas = document.createElement("canvas"); + const imageContext = imageCanvas.getContext("2d"); + + if (imageContext) { + imageCanvas.width = video.videoWidth; + imageCanvas.height = video.videoHeight; + + // Mirror the image context as well + imageContext.translate(imageCanvas.width, 0); + imageContext.scale(-1, 1); + + imageContext.drawImage( + video, + 0, + 0, + imageCanvas.width, + imageCanvas.height + ); + + // Convert to Blob and send + imageCanvas.toBlob((blob) => { + if (blob) { + sendFaceDataToAPI(blob); + } + }, "image/jpeg"); } - }, "image/jpeg"); // Save image as JPEG + } }; const sendFaceDataToAPI = async (imageBlob: Blob) => {