"use client"; import { useEffect, useRef, useState } from "react"; import Webcam from "react-webcam"; import * as faceapi from "face-api.js"; import { Button } from "@/components/ui/button"; import { Camera } from "lucide-react"; import { useToast } from "@/hooks/use-toast"; const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model"; const PADDING = 60; const WebcamDetection = () => { const webcamRef = useRef(null); const canvasRef = useRef(null); const [isModelLoaded, setIsModelLoaded] = useState(false); const [isDetecting, setIsDetecting] = useState(false); const { toast } = useToast(); useEffect(() => { const loadModels = async () => { try { await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL); await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL); await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL); setIsModelLoaded(true); } catch (error) { console.error("Error loading models:", error); toast({ title: "Error", description: "Failed to load face detection models.", variant: "destructive", }); } }; loadModels(); }, [toast]); const extractFaceWithPadding = ( video: HTMLVideoElement, box: faceapi.Box ): HTMLCanvasElement => { const canvas = document.createElement("canvas"); const context = canvas.getContext("2d"); // Calculate padded dimensions const x = Math.max(0, box.x - PADDING); const y = Math.max(0, box.y - PADDING); const width = Math.min(video.videoWidth - x, box.width + 2 * PADDING); const height = Math.min(video.videoHeight - y, box.height + 2 * PADDING); canvas.width = width; canvas.height = height; if (context) { context.drawImage(video, x, y, width, height, 0, 0, width, height); } return canvas; }; const detectFace = async () => { if (!webcamRef.current?.video || !canvasRef.current) return; const video = webcamRef.current.video; const canvas = canvasRef.current; const context = canvas.getContext("2d"); if (!context) return; canvas.width = video.videoWidth; canvas.height = video.videoHeight; context.clearRect(0, 0, canvas.width, canvas.height); context.translate(canvas.width, 0); context.scale(-1, 1); const detections = await faceapi .detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()) .withFaceLandmarks() .withFaceDescriptors(); if (detections.length > 0) { const highConfidenceDetections = detections.filter( (detection) => detection.detection.score > 0.5 ); for (const detection of highConfidenceDetections) { const { box } = detection.detection; context.strokeStyle = "#00FF00"; context.lineWidth = 2; context.strokeRect(box.x, box.y, box.width, box.height); context.save(); context.scale(-1, 1); context.fillStyle = "#00FF00"; context.font = "16px Arial"; context.fillText( `Confidence: ${Math.round(detection.detection.score * 100)}%`, -box.x - box.width, box.y - 5 ); context.restore(); const faceCanvas = extractFaceWithPadding(video, box); faceCanvas.toBlob( (blob) => { if (blob) sendFaceDataToAPI(blob); }, "image/jpeg", 0.95 ); } } }; const sendFaceDataToAPI = async (imageBlob: Blob) => { try { const formData = new FormData(); formData.append("image", imageBlob, "face.jpg"); const response = await fetch( `${process.env.NEXT_PUBLIC_BASE_URL}/search`, { method: "POST", body: formData, } ); const data = await response.json(); toast({ title: data?.name, description: data.message }); } catch (error) { console.error("Error sending face data:", error); toast({ title: "Error", description: "Failed to send face data.", variant: "destructive", }); } }; const startDetection = () => { if (!isModelLoaded) return; setIsDetecting(true); setInterval(detectFace, 1000); }; return (
); }; export default WebcamDetection;