158 lines
4.6 KiB
TypeScript
158 lines
4.6 KiB
TypeScript
"use client";
|
|
|
|
import { useEffect, useRef, useState } from "react";
|
|
import Webcam from "react-webcam";
|
|
import * as faceapi from "face-api.js";
|
|
import { Button } from "@/components/ui/button";
|
|
import { Camera } from "lucide-react";
|
|
import { useToast } from "@/hooks/use-toast";
|
|
|
|
const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
|
|
|
|
const RealtimeFaceDetection = () => {
|
|
const webcamRef = useRef<Webcam>(null);
|
|
const canvasRef = useRef<HTMLCanvasElement>(null);
|
|
const [isModelLoaded, setIsModelLoaded] = useState(false);
|
|
const [isDetecting, setIsDetecting] = useState(false);
|
|
const { toast } = useToast();
|
|
|
|
useEffect(() => {
|
|
const loadModels = async () => {
|
|
try {
|
|
await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
|
|
await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
|
|
await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL);
|
|
setIsModelLoaded(true);
|
|
} catch (error) {
|
|
console.error("Error loading models:", error);
|
|
toast({
|
|
title: "Error",
|
|
description: "Failed to load face detection models.",
|
|
variant: "destructive",
|
|
});
|
|
}
|
|
};
|
|
loadModels();
|
|
}, [toast]);
|
|
|
|
const detectFace = async () => {
|
|
if (!webcamRef.current || !webcamRef.current.video || !canvasRef.current)
|
|
return;
|
|
|
|
const video = webcamRef.current.video;
|
|
const canvas = canvasRef.current;
|
|
const context = canvas.getContext("2d");
|
|
|
|
if (!context) return;
|
|
|
|
// Set canvas size to match video
|
|
canvas.width = video.videoWidth;
|
|
canvas.height = video.videoHeight;
|
|
context.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings
|
|
|
|
// Mirror the canvas context to match the mirrored video
|
|
context.translate(canvas.width, 0); // Move the origin to the right side of the canvas
|
|
context.scale(-1, 1); // Flip the context horizontally
|
|
|
|
// Detect face
|
|
const detections = await faceapi
|
|
.detectSingleFace(video, new faceapi.TinyFaceDetectorOptions())
|
|
.withFaceLandmarks()
|
|
.withFaceDescriptor();
|
|
|
|
if (detections) {
|
|
// Draw bounding box
|
|
const { x, y, width, height } = detections.detection.box;
|
|
context.strokeStyle = "red"; // Box color
|
|
context.lineWidth = 3;
|
|
context.strokeRect(x, y, width, height);
|
|
|
|
// Capture the face as an image
|
|
const imageCanvas = document.createElement("canvas");
|
|
const imageContext = imageCanvas.getContext("2d");
|
|
|
|
if (imageContext) {
|
|
imageCanvas.width = video.videoWidth;
|
|
imageCanvas.height = video.videoHeight;
|
|
|
|
// Mirror the image context as well
|
|
imageContext.translate(imageCanvas.width, 0);
|
|
imageContext.scale(-1, 1);
|
|
|
|
imageContext.drawImage(
|
|
video,
|
|
0,
|
|
0,
|
|
imageCanvas.width,
|
|
imageCanvas.height
|
|
);
|
|
|
|
// Convert to Blob and send
|
|
imageCanvas.toBlob((blob) => {
|
|
if (blob) {
|
|
sendFaceDataToAPI(blob);
|
|
}
|
|
}, "image/jpeg");
|
|
}
|
|
}
|
|
};
|
|
|
|
const sendFaceDataToAPI = async (imageBlob: Blob) => {
|
|
try {
|
|
const formData = new FormData();
|
|
formData.append("image", imageBlob, "face.jpg");
|
|
|
|
const response = await fetch(
|
|
`${process.env.NEXT_PUBLIC_BASE_URL}/search`,
|
|
{
|
|
method: "POST",
|
|
body: formData, // Send multipart/form-data
|
|
}
|
|
);
|
|
|
|
const data = await response.json();
|
|
toast({ title: data?.name, description: data.message });
|
|
} catch (error) {
|
|
console.error("Error sending face data:", error);
|
|
toast({
|
|
title: "Error",
|
|
description: "Failed to send face data.",
|
|
variant: "destructive",
|
|
});
|
|
}
|
|
};
|
|
|
|
const startDetection = () => {
|
|
if (!isModelLoaded) return;
|
|
setIsDetecting(true);
|
|
const interval = setInterval(detectFace, 1000);
|
|
setTimeout(() => {
|
|
clearInterval(interval);
|
|
setIsDetecting(false);
|
|
}, 100000); // Stops detection after 10 seconds
|
|
};
|
|
|
|
return (
|
|
<div className="max-w-3xl mx-auto">
|
|
<div className="relative">
|
|
<Webcam ref={webcamRef} mirrored className="w-full rounded-lg" />
|
|
<canvas
|
|
ref={canvasRef}
|
|
className="absolute top-0 left-0 w-full h-full"
|
|
/>
|
|
</div>
|
|
<div className="mt-6 flex justify-center">
|
|
<Button
|
|
onClick={startDetection}
|
|
disabled={!isModelLoaded || isDetecting}
|
|
>
|
|
<Camera className="mr-2 h-4 w-4" />
|
|
{isDetecting ? "Detecting..." : "Start Realtime Detection"}
|
|
</Button>
|
|
</div>
|
|
</div>
|
|
);
|
|
};
|
|
|
|
export default RealtimeFaceDetection;
|