193 lines
5.9 KiB
TypeScript
193 lines
5.9 KiB
TypeScript
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
"use client";
|
|
|
|
import { useState, useRef, useEffect } from "react";
|
|
import Webcam from "react-webcam";
|
|
import { Card, CardContent } from "@/components/ui/card";
|
|
import { Loader2 } from "lucide-react";
|
|
import { useToast } from "@/hooks/use-toast";
|
|
import * as faceapi from "face-api.js";
|
|
|
|
export default function FaceLiveness() {
|
|
const webcamRef = useRef<Webcam>(null);
|
|
const [isModelLoading, setIsModelLoading] = useState(true);
|
|
const [isProcessing, setIsProcessing] = useState(false);
|
|
const [previousExpressions, setPreviousExpressions] = useState<any>(null);
|
|
const processingTimeoutRef = useRef<NodeJS.Timeout | null>(null);
|
|
const { toast } = useToast();
|
|
|
|
useEffect(() => {
|
|
const loadModels = async () => {
|
|
try {
|
|
const MODEL_URL =
|
|
"https://justadudewhohacks.github.io/face-api.js/models";
|
|
await Promise.all([
|
|
faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL),
|
|
faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL),
|
|
faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL),
|
|
]);
|
|
setIsModelLoading(false);
|
|
} catch (error) {
|
|
console.error("Error loading models:", error);
|
|
toast({
|
|
title: "Error",
|
|
description:
|
|
"Failed to load face detection models. Please refresh the page.",
|
|
variant: "destructive",
|
|
});
|
|
}
|
|
};
|
|
loadModels();
|
|
}, [toast]);
|
|
|
|
const checkLiveness = (expressions: any, landmarks: any) => {
|
|
if (!previousExpressions) {
|
|
setPreviousExpressions(expressions);
|
|
return false;
|
|
}
|
|
|
|
// Check for expression changes
|
|
const expressionThreshold = 0.1;
|
|
let hasExpressionChange = false;
|
|
for (const expression in expressions) {
|
|
const diff = Math.abs(
|
|
expressions[expression] - previousExpressions[expression]
|
|
);
|
|
if (diff > expressionThreshold) {
|
|
hasExpressionChange = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Check for natural facial movement using landmarks
|
|
const eyeBlinkDetected = detectEyeBlink(landmarks);
|
|
|
|
setPreviousExpressions(expressions);
|
|
return hasExpressionChange || eyeBlinkDetected;
|
|
};
|
|
|
|
const detectEyeBlink = (landmarks: any) => {
|
|
const leftEye = landmarks.getLeftEye();
|
|
const rightEye = landmarks.getRightEye();
|
|
|
|
// Calculate eye aspect ratio
|
|
const leftEAR = getEyeAspectRatio(leftEye);
|
|
const rightEAR = getEyeAspectRatio(rightEye);
|
|
|
|
// If either eye is closed (low aspect ratio), consider it a blink
|
|
const blinkThreshold = 0.2;
|
|
return leftEAR < blinkThreshold || rightEAR < blinkThreshold;
|
|
};
|
|
|
|
const getEyeAspectRatio = (eye: any) => {
|
|
// Calculate the eye aspect ratio using the landmark points
|
|
const height1 = distance(eye[1], eye[5]);
|
|
const height2 = distance(eye[2], eye[4]);
|
|
const width = distance(eye[0], eye[3]);
|
|
return (height1 + height2) / (2.0 * width);
|
|
};
|
|
|
|
const distance = (point1: any, point2: any) => {
|
|
return Math.sqrt(
|
|
Math.pow(point1.x - point2.x, 2) + Math.pow(point1.y - point2.y, 2)
|
|
);
|
|
};
|
|
|
|
useEffect(() => {
|
|
const processFrame = async () => {
|
|
if (!webcamRef.current || isProcessing || isModelLoading) return;
|
|
|
|
setIsProcessing(true);
|
|
try {
|
|
const imageSrc = webcamRef.current.getScreenshot();
|
|
if (!imageSrc) return;
|
|
|
|
const img = new Image();
|
|
img.src = imageSrc;
|
|
await new Promise((resolve) => (img.onload = resolve));
|
|
|
|
const detections = await faceapi
|
|
.detectAllFaces(img, new faceapi.TinyFaceDetectorOptions())
|
|
.withFaceLandmarks()
|
|
.withFaceExpressions();
|
|
|
|
if (detections.length > 0) {
|
|
// Process each detected face with high confidence
|
|
detections
|
|
.filter((detection) => detection.detection.score > 0.7)
|
|
.forEach((detection) => {
|
|
const isLive = checkLiveness(
|
|
detection.expressions,
|
|
detection.landmarks
|
|
);
|
|
if (isLive) {
|
|
toast({
|
|
title: "Liveness Detected",
|
|
description: "Real face detected with natural movements",
|
|
});
|
|
} else {
|
|
toast({
|
|
title: "Liveness Check",
|
|
description: "Please move or blink naturally",
|
|
variant: "destructive",
|
|
});
|
|
}
|
|
});
|
|
}
|
|
} catch (error) {
|
|
console.error("Processing error:", error);
|
|
} finally {
|
|
setIsProcessing(false);
|
|
// Schedule next frame processing
|
|
processingTimeoutRef.current = setTimeout(processFrame, 1000); // Process every second
|
|
}
|
|
};
|
|
|
|
if (!isModelLoading) {
|
|
processFrame();
|
|
}
|
|
|
|
return () => {
|
|
if (processingTimeoutRef.current) {
|
|
clearTimeout(processingTimeoutRef.current);
|
|
}
|
|
};
|
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
|
}, [isModelLoading, isProcessing, toast]);
|
|
|
|
if (isModelLoading) {
|
|
return (
|
|
<Card>
|
|
<CardContent className="p-6 text-center">
|
|
<Loader2 className="h-8 w-8 animate-spin mx-auto" />
|
|
<p className="mt-2">Loading face detection models...</p>
|
|
</CardContent>
|
|
</Card>
|
|
);
|
|
}
|
|
|
|
return (
|
|
<Card>
|
|
<CardContent className="p-6">
|
|
<div className="space-y-6">
|
|
<div className="relative rounded-lg overflow-hidden bg-black">
|
|
<Webcam
|
|
ref={webcamRef}
|
|
screenshotFormat="image/jpeg"
|
|
className="w-full"
|
|
videoConstraints={{
|
|
width: 640,
|
|
height: 480,
|
|
facingMode: "user",
|
|
}}
|
|
/>
|
|
</div>
|
|
<p className="text-center text-sm text-muted-foreground">
|
|
Move your face naturally or blink to verify liveness
|
|
</p>
|
|
</div>
|
|
</CardContent>
|
|
</Card>
|
|
);
|
|
}
|