"use client"; import { useEffect, useRef, useState } from "react"; import Webcam from "react-webcam"; import * as faceapi from "face-api.js"; import { Button } from "@/components/ui/button"; import { Camera } from "lucide-react"; import { useToast } from "@/hooks/use-toast"; const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model"; const PADDING = 50; // Padding around face in pixels const RealtimeFaceDetection = () => { const webcamRef = useRef(null); const canvasRef = useRef(null); const [isModelLoaded, setIsModelLoaded] = useState(false); const [isDetecting, setIsDetecting] = useState(false); const { toast } = useToast(); useEffect(() => { const loadModels = async () => { try { await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL); await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL); await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL); setIsModelLoaded(true); } catch (error) { console.error("Error loading models:", error); toast({ title: "Error", description: "Failed to load face detection models.", variant: "destructive", }); } }; loadModels(); }, [toast]); const extractFaceWithPadding = ( video: HTMLVideoElement, box: faceapi.Box ): HTMLCanvasElement => { const canvas = document.createElement("canvas"); const context = canvas.getContext("2d"); // Calculate padded dimensions const x = Math.max(0, box.x - PADDING); const y = Math.max(0, box.y - PADDING); const width = Math.min(video.videoWidth - x, box.width + 2 * PADDING); const height = Math.min(video.videoHeight - y, box.height + 2 * PADDING); canvas.width = width; canvas.height = height; if (context) { // Extract face region with padding context.drawImage( video, x, y, width, height, // Source coordinates 0, 0, width, height // Destination coordinates ); } return canvas; }; const detectFace = async () => { if (!webcamRef.current?.video || !canvasRef.current) return; const video = webcamRef.current.video; const canvas = canvasRef.current; const context = canvas.getContext("2d"); if (!context) return; canvas.width = video.videoWidth; canvas.height = video.videoHeight; context.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings // Mirror the canvas context to match the mirrored video context.translate(canvas.width, 0); // Move the origin to the right side of the canvas context.scale(-1, 1); // Flip the context horizontally // Detect all faces const detections = await faceapi .detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()) .withFaceLandmarks() .withFaceDescriptors(); for (const detection of detections) { // Draw box for visualization const { box } = detection.detection; context.strokeStyle = "#00FF00"; context.lineWidth = 2; context.strokeRect(box.x, box.y, box.width, box.height); // Extract face with padding and send to API const faceCanvas = extractFaceWithPadding(video, box); faceCanvas.toBlob( (blob) => { if (blob) sendFaceDataToAPI(blob); }, "image/jpeg", 0.95 ); } }; const sendFaceDataToAPI = async (imageBlob: Blob) => { try { const formData = new FormData(); formData.append("image", imageBlob, "face.jpg"); const response = await fetch( `${process.env.NEXT_PUBLIC_BASE_URL}/search`, { method: "POST", body: formData, } ); const data = await response.json(); toast({ title: data?.name, description: data.message }); } catch (error) { console.error("Error sending face data:", error); toast({ title: "Error", description: "Failed to send face data.", variant: "destructive", }); } }; const startDetection = () => { if (!isModelLoaded) return; setIsDetecting(true); setInterval(detectFace, 300); }; return (
); }; export default RealtimeFaceDetection;