2025-02-07 19:41:48 +05:30

113 lines
3.3 KiB
TypeScript

"use client";
import { useEffect, useRef, useState } from "react";
import Webcam from "react-webcam";
import * as faceapi from "face-api.js";
import { Button } from "@/components/ui/button";
import { AlertTriangle, Camera } from "lucide-react";
import { useToast } from "@/hooks/use-toast";
const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
const CHECK_INTERVAL = 500;
const FaceMovementDetection = () => {
const webcamRef = useRef<Webcam>(null);
const [isModelLoaded, setIsModelLoaded] = useState(false);
const [isDetecting, setIsDetecting] = useState(false);
const prevBoxSizeRef = useRef<number | null>(null);
const [movingForward, setMovingForward] = useState(false);
const { toast } = useToast();
useEffect(() => {
const loadModels = async () => {
try {
await Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL),
faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL),
]);
setIsModelLoaded(true);
toast({
title: "Models Loaded",
description: "Face detection models ready.",
});
} catch (error) {
console.error("Error loading models:", error);
toast({
title: "Error",
description: "Failed to load models.",
variant: "destructive",
});
}
};
loadModels();
}, [toast]);
const detectMovement = async () => {
if (!webcamRef.current?.video) return;
const video = webcamRef.current.video;
const detections = await faceapi.detectSingleFace(
video,
new faceapi.TinyFaceDetectorOptions()
);
console.log("prevBoxSizeRef:", prevBoxSizeRef.current);
if (detections) {
const { width, height } = detections.box;
const currentBoxSize = width * height;
console.log("currentBoxSize:", currentBoxSize);
if (prevBoxSizeRef.current !== null) {
const sizeIncrease = currentBoxSize - prevBoxSizeRef.current;
console.log("Size Increase:", sizeIncrease);
if (sizeIncrease > 3000) {
setMovingForward(true);
// toast({
// title: "Movement Detected",
// description: "The person is moving closer!",
// variant: "destructive",
// });
} else {
setMovingForward(false);
}
}
prevBoxSizeRef.current = currentBoxSize; // Update ref directly
}
};
const startDetection = () => {
if (!isModelLoaded) return;
setIsDetecting(true);
setInterval(detectMovement, CHECK_INTERVAL);
};
return (
<div className="max-w-3xl mx-auto">
<div className="relative">
<Webcam ref={webcamRef} mirrored className="w-full rounded-lg" />
</div>
{movingForward && (
<div className="mt-4 p-3 bg-yellow-300 text-black rounded-md flex items-center">
<AlertTriangle className="mr-2 h-5 w-5" />
Person is moving forward!
</div>
)}
<div className="mt-6 flex justify-center">
<Button
onClick={startDetection}
disabled={!isModelLoaded || isDetecting}
>
<Camera className="mr-2 h-4 w-4" />
{isDetecting ? "Detecting..." : "Start Movement Detection"}
</Button>
</div>
</div>
);
};
export default FaceMovementDetection;