multiple face detection and recognition added
This commit is contained in:
parent
ca38b7c7f4
commit
d50ac4ed64
@ -8,6 +8,7 @@ import { Camera } from "lucide-react";
|
||||
import { useToast } from "@/hooks/use-toast";
|
||||
|
||||
const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
|
||||
const PADDING = 50; // Padding around face in pixels
|
||||
|
||||
const RealtimeFaceDetection = () => {
|
||||
const webcamRef = useRef<Webcam>(null);
|
||||
@ -35,9 +36,42 @@ const RealtimeFaceDetection = () => {
|
||||
loadModels();
|
||||
}, [toast]);
|
||||
|
||||
const extractFaceWithPadding = (
|
||||
video: HTMLVideoElement,
|
||||
box: faceapi.Box
|
||||
): HTMLCanvasElement => {
|
||||
const canvas = document.createElement("canvas");
|
||||
const context = canvas.getContext("2d");
|
||||
|
||||
// Calculate padded dimensions
|
||||
const x = Math.max(0, box.x - PADDING);
|
||||
const y = Math.max(0, box.y - PADDING);
|
||||
const width = Math.min(video.videoWidth - x, box.width + 2 * PADDING);
|
||||
const height = Math.min(video.videoHeight - y, box.height + 2 * PADDING);
|
||||
|
||||
canvas.width = width;
|
||||
canvas.height = height;
|
||||
|
||||
if (context) {
|
||||
// Extract face region with padding
|
||||
context.drawImage(
|
||||
video,
|
||||
x,
|
||||
y,
|
||||
width,
|
||||
height, // Source coordinates
|
||||
0,
|
||||
0,
|
||||
width,
|
||||
height // Destination coordinates
|
||||
);
|
||||
}
|
||||
|
||||
return canvas;
|
||||
};
|
||||
|
||||
const detectFace = async () => {
|
||||
if (!webcamRef.current || !webcamRef.current.video || !canvasRef.current)
|
||||
return;
|
||||
if (!webcamRef.current?.video || !canvasRef.current) return;
|
||||
|
||||
const video = webcamRef.current.video;
|
||||
const canvas = canvasRef.current;
|
||||
@ -45,7 +79,6 @@ const RealtimeFaceDetection = () => {
|
||||
|
||||
if (!context) return;
|
||||
|
||||
// Set canvas size to match video
|
||||
canvas.width = video.videoWidth;
|
||||
canvas.height = video.videoHeight;
|
||||
context.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings
|
||||
@ -54,46 +87,28 @@ const RealtimeFaceDetection = () => {
|
||||
context.translate(canvas.width, 0); // Move the origin to the right side of the canvas
|
||||
context.scale(-1, 1); // Flip the context horizontally
|
||||
|
||||
// Detect face
|
||||
// Detect all faces
|
||||
const detections = await faceapi
|
||||
.detectSingleFace(video, new faceapi.TinyFaceDetectorOptions())
|
||||
.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
|
||||
.withFaceLandmarks()
|
||||
.withFaceDescriptor();
|
||||
.withFaceDescriptors();
|
||||
|
||||
if (detections) {
|
||||
// Draw bounding box
|
||||
const { x, y, width, height } = detections.detection.box;
|
||||
context.strokeStyle = "red"; // Box color
|
||||
context.lineWidth = 3;
|
||||
context.strokeRect(x, y, width, height);
|
||||
for (const detection of detections) {
|
||||
// Draw box for visualization
|
||||
const { box } = detection.detection;
|
||||
context.strokeStyle = "#00FF00";
|
||||
context.lineWidth = 2;
|
||||
context.strokeRect(box.x, box.y, box.width, box.height);
|
||||
|
||||
// Capture the face as an image
|
||||
const imageCanvas = document.createElement("canvas");
|
||||
const imageContext = imageCanvas.getContext("2d");
|
||||
|
||||
if (imageContext) {
|
||||
imageCanvas.width = video.videoWidth;
|
||||
imageCanvas.height = video.videoHeight;
|
||||
|
||||
// Mirror the image context as well
|
||||
imageContext.translate(imageCanvas.width, 0);
|
||||
imageContext.scale(-1, 1);
|
||||
|
||||
imageContext.drawImage(
|
||||
video,
|
||||
0,
|
||||
0,
|
||||
imageCanvas.width,
|
||||
imageCanvas.height
|
||||
// Extract face with padding and send to API
|
||||
const faceCanvas = extractFaceWithPadding(video, box);
|
||||
faceCanvas.toBlob(
|
||||
(blob) => {
|
||||
if (blob) sendFaceDataToAPI(blob);
|
||||
},
|
||||
"image/jpeg",
|
||||
0.95
|
||||
);
|
||||
|
||||
// Convert to Blob and send
|
||||
imageCanvas.toBlob((blob) => {
|
||||
if (blob) {
|
||||
sendFaceDataToAPI(blob);
|
||||
}
|
||||
}, "image/jpeg");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -106,7 +121,7 @@ const RealtimeFaceDetection = () => {
|
||||
`${process.env.NEXT_PUBLIC_BASE_URL}/search`,
|
||||
{
|
||||
method: "POST",
|
||||
body: formData, // Send multipart/form-data
|
||||
body: formData,
|
||||
}
|
||||
);
|
||||
|
||||
@ -125,13 +140,8 @@ const RealtimeFaceDetection = () => {
|
||||
const startDetection = () => {
|
||||
if (!isModelLoaded) return;
|
||||
setIsDetecting(true);
|
||||
const interval = setInterval(detectFace, 1000);
|
||||
setTimeout(() => {
|
||||
clearInterval(interval);
|
||||
setIsDetecting(false);
|
||||
}, 100000); // Stops detection after 10 seconds
|
||||
setInterval(detectFace, 300);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="max-w-3xl mx-auto">
|
||||
<div className="relative">
|
||||
|
Loading…
x
Reference in New Issue
Block a user