new components added

This commit is contained in:
Somdev Das 2025-02-07 19:41:48 +05:30
parent a0d6b7a73a
commit 75c8c99481
10 changed files with 954 additions and 2 deletions

View File

@ -4,10 +4,18 @@ import Register from "./register/Register";
import Search from "./search/Search"; import Search from "./search/Search";
import "./MainForm.css"; import "./MainForm.css";
import RealtimeFaceDetection from "./realtimeFaceDetection/RealtimeFaceDetection"; import RealtimeFaceDetection from "./realtimeFaceDetection/RealtimeFaceDetection";
import FaceLiveness from "./faceLivelinessCheck/FaceLivelinessCheck";
import FaceMovementDetection from "./faceMovementDetection/FaceMovementDetection";
import RealtimeCount from "./realtimeCount/RealtimeCount";
const MainForm: React.FC = () => { const MainForm: React.FC = () => {
const [activeTab, setActiveTab] = useState< const [activeTab, setActiveTab] = useState<
"register" | "search" | "realtime" | "register"
| "search"
| "realtime"
| "liveliness"
| "realtime-count"
| "facemovement"
>("register"); >("register");
return ( return (
@ -31,11 +39,36 @@ const MainForm: React.FC = () => {
> >
Realtime Detection Realtime Detection
</button> </button>
{/* <button
className={`tab-button ${activeTab === "liveliness" ? "active" : ""}`}
onClick={() => setActiveTab("liveliness")}
>
Liveliness Detection
</button> */}
<button
className={`tab-button ${
activeTab === "realtime-count" ? "active" : ""
}`}
onClick={() => setActiveTab("realtime-count")}
>
Realtime Count
</button>
<button
className={`tab-button ${
activeTab === "facemovement" ? "active" : ""
}`}
onClick={() => setActiveTab("facemovement")}
>
Face Movement Detection
</button>
</div> </div>
<div className="tab-content"> <div className="tab-content">
{activeTab === "register" && <Register />} {activeTab === "register" && <Register />}
{activeTab === "search" && <Search />} {activeTab === "search" && <Search />}
{activeTab === "realtime" && <RealtimeFaceDetection />} {activeTab === "realtime" && <RealtimeFaceDetection />}
{activeTab === "liveliness" && <FaceLiveness />}
{activeTab === "realtime-count" && <RealtimeCount />}
{activeTab === "facemovement" && <FaceMovementDetection />}
</div> </div>
</div> </div>
); );

View File

@ -0,0 +1,192 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
"use client";
import { useState, useRef, useEffect } from "react";
import Webcam from "react-webcam";
import { Card, CardContent } from "@/components/ui/card";
import { Loader2 } from "lucide-react";
import { useToast } from "@/hooks/use-toast";
import * as faceapi from "face-api.js";
export default function FaceLiveness() {
const webcamRef = useRef<Webcam>(null);
const [isModelLoading, setIsModelLoading] = useState(true);
const [isProcessing, setIsProcessing] = useState(false);
const [previousExpressions, setPreviousExpressions] = useState<any>(null);
const processingTimeoutRef = useRef<NodeJS.Timeout | null>(null);
const { toast } = useToast();
useEffect(() => {
const loadModels = async () => {
try {
const MODEL_URL =
"https://justadudewhohacks.github.io/face-api.js/models";
await Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL),
faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL),
faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL),
]);
setIsModelLoading(false);
} catch (error) {
console.error("Error loading models:", error);
toast({
title: "Error",
description:
"Failed to load face detection models. Please refresh the page.",
variant: "destructive",
});
}
};
loadModels();
}, [toast]);
const checkLiveness = (expressions: any, landmarks: any) => {
if (!previousExpressions) {
setPreviousExpressions(expressions);
return false;
}
// Check for expression changes
const expressionThreshold = 0.1;
let hasExpressionChange = false;
for (const expression in expressions) {
const diff = Math.abs(
expressions[expression] - previousExpressions[expression]
);
if (diff > expressionThreshold) {
hasExpressionChange = true;
break;
}
}
// Check for natural facial movement using landmarks
const eyeBlinkDetected = detectEyeBlink(landmarks);
setPreviousExpressions(expressions);
return hasExpressionChange || eyeBlinkDetected;
};
const detectEyeBlink = (landmarks: any) => {
const leftEye = landmarks.getLeftEye();
const rightEye = landmarks.getRightEye();
// Calculate eye aspect ratio
const leftEAR = getEyeAspectRatio(leftEye);
const rightEAR = getEyeAspectRatio(rightEye);
// If either eye is closed (low aspect ratio), consider it a blink
const blinkThreshold = 0.2;
return leftEAR < blinkThreshold || rightEAR < blinkThreshold;
};
const getEyeAspectRatio = (eye: any) => {
// Calculate the eye aspect ratio using the landmark points
const height1 = distance(eye[1], eye[5]);
const height2 = distance(eye[2], eye[4]);
const width = distance(eye[0], eye[3]);
return (height1 + height2) / (2.0 * width);
};
const distance = (point1: any, point2: any) => {
return Math.sqrt(
Math.pow(point1.x - point2.x, 2) + Math.pow(point1.y - point2.y, 2)
);
};
useEffect(() => {
const processFrame = async () => {
if (!webcamRef.current || isProcessing || isModelLoading) return;
setIsProcessing(true);
try {
const imageSrc = webcamRef.current.getScreenshot();
if (!imageSrc) return;
const img = new Image();
img.src = imageSrc;
await new Promise((resolve) => (img.onload = resolve));
const detections = await faceapi
.detectAllFaces(img, new faceapi.TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceExpressions();
if (detections.length > 0) {
// Process each detected face with high confidence
detections
.filter((detection) => detection.detection.score > 0.7)
.forEach((detection) => {
const isLive = checkLiveness(
detection.expressions,
detection.landmarks
);
if (isLive) {
toast({
title: "Liveness Detected",
description: "Real face detected with natural movements",
});
} else {
toast({
title: "Liveness Check",
description: "Please move or blink naturally",
variant: "destructive",
});
}
});
}
} catch (error) {
console.error("Processing error:", error);
} finally {
setIsProcessing(false);
// Schedule next frame processing
processingTimeoutRef.current = setTimeout(processFrame, 1000); // Process every second
}
};
if (!isModelLoading) {
processFrame();
}
return () => {
if (processingTimeoutRef.current) {
clearTimeout(processingTimeoutRef.current);
}
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isModelLoading, isProcessing, toast]);
if (isModelLoading) {
return (
<Card>
<CardContent className="p-6 text-center">
<Loader2 className="h-8 w-8 animate-spin mx-auto" />
<p className="mt-2">Loading face detection models...</p>
</CardContent>
</Card>
);
}
return (
<Card>
<CardContent className="p-6">
<div className="space-y-6">
<div className="relative rounded-lg overflow-hidden bg-black">
<Webcam
ref={webcamRef}
screenshotFormat="image/jpeg"
className="w-full"
videoConstraints={{
width: 640,
height: 480,
facingMode: "user",
}}
/>
</div>
<p className="text-center text-sm text-muted-foreground">
Move your face naturally or blink to verify liveness
</p>
</div>
</CardContent>
</Card>
);
}

View File

@ -0,0 +1,28 @@
.video-container {
position: relative;
width: 100%;
max-width: 768px;
margin: 0 auto;
}
.webcam {
width: 100%;
border-radius: 8px;
}
.alert-warning {
display: flex;
align-items: center;
background-color: #facc15; /* Yellow */
color: #000;
padding: 12px;
border-radius: 6px;
margin-top: 16px;
font-weight: bold;
}
.button-container {
margin-top: 24px;
display: flex;
justify-content: center;
}

View File

@ -0,0 +1,112 @@
"use client";
import { useEffect, useRef, useState } from "react";
import Webcam from "react-webcam";
import * as faceapi from "face-api.js";
import { Button } from "@/components/ui/button";
import { AlertTriangle, Camera } from "lucide-react";
import { useToast } from "@/hooks/use-toast";
const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
const CHECK_INTERVAL = 500;
const FaceMovementDetection = () => {
const webcamRef = useRef<Webcam>(null);
const [isModelLoaded, setIsModelLoaded] = useState(false);
const [isDetecting, setIsDetecting] = useState(false);
const prevBoxSizeRef = useRef<number | null>(null);
const [movingForward, setMovingForward] = useState(false);
const { toast } = useToast();
useEffect(() => {
const loadModels = async () => {
try {
await Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL),
faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL),
]);
setIsModelLoaded(true);
toast({
title: "Models Loaded",
description: "Face detection models ready.",
});
} catch (error) {
console.error("Error loading models:", error);
toast({
title: "Error",
description: "Failed to load models.",
variant: "destructive",
});
}
};
loadModels();
}, [toast]);
const detectMovement = async () => {
if (!webcamRef.current?.video) return;
const video = webcamRef.current.video;
const detections = await faceapi.detectSingleFace(
video,
new faceapi.TinyFaceDetectorOptions()
);
console.log("prevBoxSizeRef:", prevBoxSizeRef.current);
if (detections) {
const { width, height } = detections.box;
const currentBoxSize = width * height;
console.log("currentBoxSize:", currentBoxSize);
if (prevBoxSizeRef.current !== null) {
const sizeIncrease = currentBoxSize - prevBoxSizeRef.current;
console.log("Size Increase:", sizeIncrease);
if (sizeIncrease > 3000) {
setMovingForward(true);
// toast({
// title: "Movement Detected",
// description: "The person is moving closer!",
// variant: "destructive",
// });
} else {
setMovingForward(false);
}
}
prevBoxSizeRef.current = currentBoxSize; // Update ref directly
}
};
const startDetection = () => {
if (!isModelLoaded) return;
setIsDetecting(true);
setInterval(detectMovement, CHECK_INTERVAL);
};
return (
<div className="max-w-3xl mx-auto">
<div className="relative">
<Webcam ref={webcamRef} mirrored className="w-full rounded-lg" />
</div>
{movingForward && (
<div className="mt-4 p-3 bg-yellow-300 text-black rounded-md flex items-center">
<AlertTriangle className="mr-2 h-5 w-5" />
Person is moving forward!
</div>
)}
<div className="mt-6 flex justify-center">
<Button
onClick={startDetection}
disabled={!isModelLoaded || isDetecting}
>
<Camera className="mr-2 h-4 w-4" />
{isDetecting ? "Detecting..." : "Start Movement Detection"}
</Button>
</div>
</div>
);
};
export default FaceMovementDetection;

View File

@ -0,0 +1,39 @@
"use client";
import React, { useState } from "react";
import RegisterFaceCount from "./registerFaceCount/RegisterFaceCount";
import HeadCount from "./headCount/HeadCount";
const RealtimeCount: React.FC = () => {
const [activeTab, setActiveTab] = useState<
"register-face-count" | "get-face-count"
>("register-face-count");
return (
<div className="main-container">
<div className="tabs">
<button
className={`tab-button ${
activeTab === "register-face-count" ? "active" : ""
}`}
onClick={() => setActiveTab("register-face-count")}
>
Register Face Count
</button>
<button
className={`tab-button ${
activeTab === "get-face-count" ? "active" : ""
}`}
onClick={() => setActiveTab("get-face-count")}
>
Get Face Count
</button>
</div>
<div className="tab-content">
{activeTab === "register-face-count" && <RegisterFaceCount />}
{activeTab === "get-face-count" && <HeadCount />}
</div>
</div>
);
};
export default RealtimeCount;

View File

@ -0,0 +1,120 @@
.headcount-container {
max-width: 500px;
margin: 0 auto;
padding: 20px;
font-family: Arial, sans-serif;
background-color: #f9f9f9;
border-radius: 8px;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
}
.heading {
text-align: center;
color: #333;
margin-bottom: 20px;
}
.form {
display: flex;
flex-direction: column;
gap: 15px;
}
.input-group {
display: flex;
flex-direction: column;
gap: 5px;
}
.label {
font-size: 14px;
color: #555;
}
.input {
padding: 10px;
font-size: 16px;
border-radius: 4px;
border: 1px solid #ccc;
outline: none;
}
.button {
padding: 10px;
font-size: 16px;
background-color: #007bff;
color: #fff;
border: none;
border-radius: 4px;
cursor: pointer;
transition: background-color 0.3s ease;
}
.button:disabled {
background-color: #ccc;
cursor: not-allowed;
}
.error {
color: red;
text-align: center;
margin-top: 10px;
}
.names-container {
margin-top: 20px;
}
.sub-heading {
color: #333;
margin-bottom: 10px;
}
.list {
list-style: none;
padding: 0;
}
.list-item {
padding: 10px;
background-color: #fff;
border: 1px solid #ddd;
border-radius: 4px;
margin-bottom: 5px;
font-size: 14px;
}
.daily-counts-container {
margin-top: 20px;
padding: 15px;
border-radius: 8px;
background: #f8f9fa;
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
}
.sub-heading {
font-size: 1.5rem;
margin-bottom: 10px;
color: #333;
}
.list {
list-style-type: none;
padding: 0;
}
.list-item {
font-size: 1.1rem;
padding: 8px 0;
border-bottom: 1px solid #ddd;
display: flex;
justify-content: space-between;
}
.date {
font-weight: bold;
color: #007bff;
}
.count {
color: #555;
}

View File

@ -0,0 +1,183 @@
import React, { useState } from "react";
import "./HeadCount.css";
interface ApiResponse {
total_unique_faces: number;
daily_counts: { date: string; unique_faces: number }[];
}
interface DateTimeRange {
date: string;
time: string;
}
const HeadCount: React.FC = () => {
const [from, setFrom] = useState<DateTimeRange>({ date: "", time: "" });
const [to, setTo] = useState<DateTimeRange>({ date: "", time: "" });
const [count, setCount] = useState<number | null>(null);
const [loading, setLoading] = useState<boolean>(false);
const [error, setError] = useState<string | null>(null);
const [dailyCounts, setDailyCounts] = useState<
{ date: string; count: number }[]
>([]);
const handleInputChange = (
e: React.ChangeEvent<HTMLInputElement>,
field: "from" | "to"
) => {
const { name, value } = e.target;
if (field === "from") {
setFrom((prev) => ({ ...prev, [name]: value }));
} else {
setTo((prev) => ({ ...prev, [name]: value }));
}
};
const formatDateTime = (date: string, time: string) => {
return `${date}T${time}:00+00:00`;
};
const handleSubmit = async (e: React.FormEvent) => {
e.preventDefault();
if (!from.date || !from.time || !to.date || !to.time) {
setError("Please fill in all date and time fields.");
return;
}
setLoading(true);
setError(null);
const start = formatDateTime(from.date, from.time);
const end = formatDateTime(to.date, to.time);
console.log(start, end);
try {
const response = await fetch(
`${
process.env.NEXT_PUBLIC_BASE_URL
}/face/headcount?start_time=${encodeURIComponent(
start
)}&end_time=${encodeURIComponent(end)}`,
{
method: "GET",
headers: {
"Content-Type": "application/json",
},
}
);
if (!response.ok) {
throw new Error("Failed to fetch data");
}
const data: ApiResponse = await response.json();
setCount(data.total_unique_faces);
if (data?.daily_counts) {
setDailyCounts(
data.daily_counts.map((d) => ({
date: d.date,
count: d.unique_faces,
}))
);
}
} catch (err) {
setError("An error occurred while fetching data.");
console.error(err);
} finally {
setLoading(false);
}
};
return (
<div className="headcount-container">
<h1 className="heading">Head Count</h1>
<form onSubmit={handleSubmit} className="form">
<div className="input-group">
<label htmlFor="from-date" className="label">
From Date:
</label>
<input
type="date"
id="from-date"
name="date"
value={from.date}
onChange={(e) => handleInputChange(e, "from")}
required
className="input"
/>
</div>
<div className="input-group">
<label htmlFor="from-time" className="label">
From Time:
</label>
<input
type="time"
id="from-time"
name="time"
value={from.time}
onChange={(e) => handleInputChange(e, "from")}
required
className="input"
/>
</div>
<div className="input-group">
<label htmlFor="to-date" className="label">
To Date:
</label>
<input
type="date"
id="to-date"
name="date"
value={to.date}
onChange={(e) => handleInputChange(e, "to")}
required
className="input"
/>
</div>
<div className="input-group">
<label htmlFor="to-time" className="label">
To Time:
</label>
<input
type="time"
id="to-time"
name="time"
value={to.time}
onChange={(e) => handleInputChange(e, "to")}
required
className="input"
/>
</div>
<button type="submit" disabled={loading} className="button">
{loading ? "Submitting..." : "Submit"}
</button>
</form>
{error && <p className="error">{error}</p>}
{count && (
<div className="names-container">
<h2 className="sub-heading">Total Unique Face Count:</h2>
<ul className="list">{count}</ul>
</div>
)}
{dailyCounts?.length > 0 && (
<div className="daily-counts-container">
<h2 className="sub-heading">Daily Counts:</h2>
<ul className="list">
{dailyCounts.map((item, index) => (
<li key={index} className="list-item">
<span className="date">{item.date}:</span>
<span className="count"> {item.count}</span>
</li>
))}
</ul>
</div>
)}
</div>
);
};
export default HeadCount;

View File

@ -0,0 +1,166 @@
import React from "react";
import { useEffect, useRef, useState } from "react";
import Webcam from "react-webcam";
import * as faceapi from "face-api.js";
import { Button } from "@/components/ui/button";
import { Camera } from "lucide-react";
import { useToast } from "@/hooks/use-toast";
const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
const PADDING = 60;
const RegisterFaceCount = () => {
const webcamRef = useRef<Webcam>(null);
const canvasRef = useRef<HTMLCanvasElement>(null);
const [isModelLoaded, setIsModelLoaded] = useState(false);
const [isDetecting, setIsDetecting] = useState(false);
const { toast } = useToast();
useEffect(() => {
const loadModels = async () => {
try {
await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL);
setIsModelLoaded(true);
} catch (error) {
console.error("Error loading models:", error);
toast({
title: "Error",
description: "Failed to load face detection models.",
variant: "destructive",
});
}
};
loadModels();
}, [toast]);
const extractFaceWithPadding = (
video: HTMLVideoElement,
box: faceapi.Box
): HTMLCanvasElement => {
const canvas = document.createElement("canvas");
const context = canvas.getContext("2d");
// Calculate padded dimensions
const x = Math.max(0, box.x - PADDING);
const y = Math.max(0, box.y - PADDING);
const width = Math.min(video.videoWidth - x, box.width + 2 * PADDING);
const height = Math.min(video.videoHeight - y, box.height + 2 * PADDING);
canvas.width = width;
canvas.height = height;
if (context) {
context.drawImage(video, x, y, width, height, 0, 0, width, height);
}
return canvas;
};
const detectFace = async () => {
if (!webcamRef.current?.video || !canvasRef.current) return;
const video = webcamRef.current.video;
const canvas = canvasRef.current;
const context = canvas.getContext("2d");
if (!context) return;
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
context.clearRect(0, 0, canvas.width, canvas.height);
context.translate(canvas.width, 0);
context.scale(-1, 1);
const detections = await faceapi
.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors();
if (detections.length > 0) {
const highConfidenceDetections = detections.filter(
(detection) => detection.detection.score > 0.5
);
for (const detection of highConfidenceDetections) {
const { box } = detection.detection;
context.strokeStyle = "#00FF00";
context.lineWidth = 2;
context.strokeRect(box.x, box.y, box.width, box.height);
context.save();
context.scale(-1, 1);
context.fillStyle = "#00FF00";
context.font = "16px Arial";
context.fillText(
`Confidence: ${Math.round(detection.detection.score * 100)}%`,
-box.x - box.width,
box.y - 5
);
context.restore();
const faceCanvas = extractFaceWithPadding(video, box);
faceCanvas.toBlob(
(blob) => {
if (blob) sendFaceDataToAPI(blob);
},
"image/jpeg",
0.95
);
}
}
};
const sendFaceDataToAPI = async (imageBlob: Blob) => {
try {
const formData = new FormData();
formData.append("image", imageBlob, "face.jpg");
const response = await fetch(
`${process.env.NEXT_PUBLIC_BASE_URL}/face/search`,
{
method: "POST",
body: formData,
}
);
const data = await response.json();
toast({ title: data?.name, description: data.message });
} catch (error) {
console.error("Error sending face data:", error);
toast({
title: "Error",
description: "Failed to send face data.",
variant: "destructive",
});
}
};
const startDetection = () => {
if (!isModelLoaded) return;
setIsDetecting(true);
setInterval(detectFace, 1000);
};
return (
<div className="max-w-3xl mx-auto">
<div className="relative">
<Webcam ref={webcamRef} mirrored className="w-full rounded-lg" />
<canvas
ref={canvasRef}
className="absolute top-0 left-0 w-full h-full"
/>
</div>
<div className="mt-6 flex justify-center">
<Button
onClick={startDetection}
disabled={!isModelLoaded || isDetecting}
>
<Camera className="mr-2 h-4 w-4" />
{isDetecting ? "Detecting..." : "Start Realtime Detection"}
</Button>
</div>
</div>
);
};
export default RegisterFaceCount;

View File

@ -81,7 +81,7 @@ const RealtimeFaceDetection = () => {
if (detections.length > 0) { if (detections.length > 0) {
const highConfidenceDetections = detections.filter( const highConfidenceDetections = detections.filter(
(detection) => detection.detection.score > 0.7 (detection) => detection.detection.score > 0.5
); );
for (const detection of highConfidenceDetections) { for (const detection of highConfidenceDetections) {

79
components/ui/card.tsx Normal file
View File

@ -0,0 +1,79 @@
import * as React from "react"
import { cn } from "@/lib/utils"
const Card = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn(
"rounded-lg border bg-card text-card-foreground shadow-sm",
className
)}
{...props}
/>
))
Card.displayName = "Card"
const CardHeader = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn("flex flex-col space-y-1.5 p-6", className)}
{...props}
/>
))
CardHeader.displayName = "CardHeader"
const CardTitle = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn(
"text-2xl font-semibold leading-none tracking-tight",
className
)}
{...props}
/>
))
CardTitle.displayName = "CardTitle"
const CardDescription = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn("text-sm text-muted-foreground", className)}
{...props}
/>
))
CardDescription.displayName = "CardDescription"
const CardContent = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div ref={ref} className={cn("p-6 pt-0", className)} {...props} />
))
CardContent.displayName = "CardContent"
const CardFooter = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn("flex items-center p-6 pt-0", className)}
{...props}
/>
))
CardFooter.displayName = "CardFooter"
export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent }