multiple face detection and recognition added

This commit is contained in:
Somdev Das 2025-02-05 13:36:40 +05:30
parent ca38b7c7f4
commit d50ac4ed64

View File

@ -8,6 +8,7 @@ import { Camera } from "lucide-react";
import { useToast } from "@/hooks/use-toast"; import { useToast } from "@/hooks/use-toast";
const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model"; const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
const PADDING = 50; // Padding around face in pixels
const RealtimeFaceDetection = () => { const RealtimeFaceDetection = () => {
const webcamRef = useRef<Webcam>(null); const webcamRef = useRef<Webcam>(null);
@ -35,9 +36,42 @@ const RealtimeFaceDetection = () => {
loadModels(); loadModels();
}, [toast]); }, [toast]);
const extractFaceWithPadding = (
video: HTMLVideoElement,
box: faceapi.Box
): HTMLCanvasElement => {
const canvas = document.createElement("canvas");
const context = canvas.getContext("2d");
// Calculate padded dimensions
const x = Math.max(0, box.x - PADDING);
const y = Math.max(0, box.y - PADDING);
const width = Math.min(video.videoWidth - x, box.width + 2 * PADDING);
const height = Math.min(video.videoHeight - y, box.height + 2 * PADDING);
canvas.width = width;
canvas.height = height;
if (context) {
// Extract face region with padding
context.drawImage(
video,
x,
y,
width,
height, // Source coordinates
0,
0,
width,
height // Destination coordinates
);
}
return canvas;
};
const detectFace = async () => { const detectFace = async () => {
if (!webcamRef.current || !webcamRef.current.video || !canvasRef.current) if (!webcamRef.current?.video || !canvasRef.current) return;
return;
const video = webcamRef.current.video; const video = webcamRef.current.video;
const canvas = canvasRef.current; const canvas = canvasRef.current;
@ -45,7 +79,6 @@ const RealtimeFaceDetection = () => {
if (!context) return; if (!context) return;
// Set canvas size to match video
canvas.width = video.videoWidth; canvas.width = video.videoWidth;
canvas.height = video.videoHeight; canvas.height = video.videoHeight;
context.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings context.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings
@ -54,46 +87,28 @@ const RealtimeFaceDetection = () => {
context.translate(canvas.width, 0); // Move the origin to the right side of the canvas context.translate(canvas.width, 0); // Move the origin to the right side of the canvas
context.scale(-1, 1); // Flip the context horizontally context.scale(-1, 1); // Flip the context horizontally
// Detect face // Detect all faces
const detections = await faceapi const detections = await faceapi
.detectSingleFace(video, new faceapi.TinyFaceDetectorOptions()) .detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
.withFaceLandmarks() .withFaceLandmarks()
.withFaceDescriptor(); .withFaceDescriptors();
if (detections) { for (const detection of detections) {
// Draw bounding box // Draw box for visualization
const { x, y, width, height } = detections.detection.box; const { box } = detection.detection;
context.strokeStyle = "red"; // Box color context.strokeStyle = "#00FF00";
context.lineWidth = 3; context.lineWidth = 2;
context.strokeRect(x, y, width, height); context.strokeRect(box.x, box.y, box.width, box.height);
// Capture the face as an image // Extract face with padding and send to API
const imageCanvas = document.createElement("canvas"); const faceCanvas = extractFaceWithPadding(video, box);
const imageContext = imageCanvas.getContext("2d"); faceCanvas.toBlob(
(blob) => {
if (imageContext) { if (blob) sendFaceDataToAPI(blob);
imageCanvas.width = video.videoWidth; },
imageCanvas.height = video.videoHeight; "image/jpeg",
0.95
// Mirror the image context as well
imageContext.translate(imageCanvas.width, 0);
imageContext.scale(-1, 1);
imageContext.drawImage(
video,
0,
0,
imageCanvas.width,
imageCanvas.height
); );
// Convert to Blob and send
imageCanvas.toBlob((blob) => {
if (blob) {
sendFaceDataToAPI(blob);
}
}, "image/jpeg");
}
} }
}; };
@ -106,7 +121,7 @@ const RealtimeFaceDetection = () => {
`${process.env.NEXT_PUBLIC_BASE_URL}/search`, `${process.env.NEXT_PUBLIC_BASE_URL}/search`,
{ {
method: "POST", method: "POST",
body: formData, // Send multipart/form-data body: formData,
} }
); );
@ -125,13 +140,8 @@ const RealtimeFaceDetection = () => {
const startDetection = () => { const startDetection = () => {
if (!isModelLoaded) return; if (!isModelLoaded) return;
setIsDetecting(true); setIsDetecting(true);
const interval = setInterval(detectFace, 1000); setInterval(detectFace, 300);
setTimeout(() => {
clearInterval(interval);
setIsDetecting(false);
}, 100000); // Stops detection after 10 seconds
}; };
return ( return (
<div className="max-w-3xl mx-auto"> <div className="max-w-3xl mx-auto">
<div className="relative"> <div className="relative">