diff --git a/components/MainForm.tsx b/components/MainForm.tsx
index 31490fb..1834ee0 100644
--- a/components/MainForm.tsx
+++ b/components/MainForm.tsx
@@ -3,10 +3,10 @@ import React, { useState } from "react";
import Register from "./register/Register";
import Search from "./search/Search";
import "./MainForm.css";
-import RealtimeFaceDetection from "./realtimeFaceDetection/RealtimeFaceDetection";
import FaceLiveness from "./faceLivelinessCheck/FaceLivelinessCheck";
import FaceMovementDetection from "./faceMovementDetection/FaceMovementDetection";
import RealtimeCount from "./realtimeCount/RealtimeCount";
+import RealtimeDetection from "./realtimeDetection/RealtimeDetection";
const MainForm: React.FC = () => {
const [activeTab, setActiveTab] = useState<
@@ -65,7 +65,7 @@ const MainForm: React.FC = () => {
{activeTab === "register" &&
}
{activeTab === "search" &&
}
- {activeTab === "realtime" &&
}
+ {activeTab === "realtime" &&
}
{activeTab === "liveliness" &&
}
{activeTab === "realtime-count" &&
}
{activeTab === "facemovement" &&
}
diff --git a/components/realtimeDetection/RealtimeDetection.tsx b/components/realtimeDetection/RealtimeDetection.tsx
new file mode 100644
index 0000000..4218894
--- /dev/null
+++ b/components/realtimeDetection/RealtimeDetection.tsx
@@ -0,0 +1,33 @@
+"use client";
+import React, { useState } from "react";
+import WebcamDetection from "./webcam/Webcam";
+import RtspStream from "./rtspStream/RtspStream";
+
+const RealtimeDetection: React.FC = () => {
+ const [activeTab, setActiveTab] = useState<"webcam" | "rtsp">("webcam");
+
+ return (
+
+
+
+
+
+
+ {activeTab === "webcam" && }
+ {activeTab === "rtsp" && }
+
+
+ );
+};
+
+export default RealtimeDetection;
diff --git a/components/realtimeDetection/rtspStream/RtspStream.tsx b/components/realtimeDetection/rtspStream/RtspStream.tsx
new file mode 100644
index 0000000..6f1197f
--- /dev/null
+++ b/components/realtimeDetection/rtspStream/RtspStream.tsx
@@ -0,0 +1,109 @@
+import React, { useState, useEffect, useRef } from "react";
+import Hls from "hls.js";
+
+const API_URL = "http://localhost:8081/start"; // Replace with your actual API endpoint
+
+const RtspStream: React.FC = () => {
+ const [rtspUrl, setRtspUrl] = useState
("");
+ const [cameraName, setCameraName] = useState("");
+ const [m3u8Url, setM3u8Url] = useState(null);
+ const [loading, setLoading] = useState(false); // Loading state
+ const videoRef = useRef(null);
+
+ useEffect(() => {
+ if (m3u8Url && videoRef.current) {
+ if (Hls.isSupported()) {
+ const hls = new Hls();
+ hls.loadSource(m3u8Url);
+ hls.attachMedia(videoRef.current);
+ } else if (
+ videoRef.current.canPlayType("application/vnd.apple.mpegurl")
+ ) {
+ videoRef.current.src = m3u8Url;
+ }
+ }
+ }, [m3u8Url]);
+
+ const handleSubmit = async (e: React.FormEvent) => {
+ e.preventDefault();
+ setLoading(true); // Set loading to true when submitting
+
+ try {
+ const response = await fetch(API_URL, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ uri: rtspUrl,
+ alias: cameraName,
+ }),
+ });
+
+ if (!response.ok) {
+ throw new Error("Failed to fetch stream URL");
+ }
+
+ const data = await response.json();
+ console.log("Stream data:", data);
+ setM3u8Url(`http://localhost:8081${data?.uri}`);
+ } catch (error) {
+ console.error("Error fetching stream:", error);
+ alert("Failed to load stream.");
+ } finally {
+ setLoading(false); // Reset loading state after API response
+ }
+ };
+
+ return (
+
+
RTSP Stream
+
+
+ {loading && (
+
+ Stream is starting...
+
+ )}
+
+ {m3u8Url && !loading && (
+
+ )}
+
+ );
+};
+
+export default RtspStream;
diff --git a/components/realtimeDetection/webcam/Webcam.tsx b/components/realtimeDetection/webcam/Webcam.tsx
new file mode 100644
index 0000000..f1ee69e
--- /dev/null
+++ b/components/realtimeDetection/webcam/Webcam.tsx
@@ -0,0 +1,167 @@
+"use client";
+
+import { useEffect, useRef, useState } from "react";
+import Webcam from "react-webcam";
+import * as faceapi from "face-api.js";
+import { Button } from "@/components/ui/button";
+import { Camera } from "lucide-react";
+import { useToast } from "@/hooks/use-toast";
+
+const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
+const PADDING = 60;
+
+const WebcamDetection = () => {
+ const webcamRef = useRef(null);
+ const canvasRef = useRef(null);
+ const [isModelLoaded, setIsModelLoaded] = useState(false);
+ const [isDetecting, setIsDetecting] = useState(false);
+ const { toast } = useToast();
+
+ useEffect(() => {
+ const loadModels = async () => {
+ try {
+ await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
+ await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
+ await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL);
+ setIsModelLoaded(true);
+ } catch (error) {
+ console.error("Error loading models:", error);
+ toast({
+ title: "Error",
+ description: "Failed to load face detection models.",
+ variant: "destructive",
+ });
+ }
+ };
+ loadModels();
+ }, [toast]);
+
+ const extractFaceWithPadding = (
+ video: HTMLVideoElement,
+ box: faceapi.Box
+ ): HTMLCanvasElement => {
+ const canvas = document.createElement("canvas");
+ const context = canvas.getContext("2d");
+
+ // Calculate padded dimensions
+ const x = Math.max(0, box.x - PADDING);
+ const y = Math.max(0, box.y - PADDING);
+ const width = Math.min(video.videoWidth - x, box.width + 2 * PADDING);
+ const height = Math.min(video.videoHeight - y, box.height + 2 * PADDING);
+
+ canvas.width = width;
+ canvas.height = height;
+
+ if (context) {
+ context.drawImage(video, x, y, width, height, 0, 0, width, height);
+ }
+
+ return canvas;
+ };
+
+ const detectFace = async () => {
+ if (!webcamRef.current?.video || !canvasRef.current) return;
+
+ const video = webcamRef.current.video;
+ const canvas = canvasRef.current;
+ const context = canvas.getContext("2d");
+
+ if (!context) return;
+
+ canvas.width = video.videoWidth;
+ canvas.height = video.videoHeight;
+ context.clearRect(0, 0, canvas.width, canvas.height);
+ context.translate(canvas.width, 0);
+ context.scale(-1, 1);
+
+ const detections = await faceapi
+ .detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
+ .withFaceLandmarks()
+ .withFaceDescriptors();
+
+ if (detections.length > 0) {
+ const highConfidenceDetections = detections.filter(
+ (detection) => detection.detection.score > 0.5
+ );
+
+ for (const detection of highConfidenceDetections) {
+ const { box } = detection.detection;
+ context.strokeStyle = "#00FF00";
+ context.lineWidth = 2;
+ context.strokeRect(box.x, box.y, box.width, box.height);
+ context.save();
+ context.scale(-1, 1);
+ context.fillStyle = "#00FF00";
+ context.font = "16px Arial";
+ context.fillText(
+ `Confidence: ${Math.round(detection.detection.score * 100)}%`,
+ -box.x - box.width,
+ box.y - 5
+ );
+ context.restore();
+
+ const faceCanvas = extractFaceWithPadding(video, box);
+ faceCanvas.toBlob(
+ (blob) => {
+ if (blob) sendFaceDataToAPI(blob);
+ },
+ "image/jpeg",
+ 0.95
+ );
+ }
+ }
+ };
+
+ const sendFaceDataToAPI = async (imageBlob: Blob) => {
+ try {
+ const formData = new FormData();
+ formData.append("image", imageBlob, "face.jpg");
+
+ const response = await fetch(
+ `${process.env.NEXT_PUBLIC_BASE_URL}/search`,
+ {
+ method: "POST",
+ body: formData,
+ }
+ );
+
+ const data = await response.json();
+ toast({ title: data?.name, description: data.message });
+ } catch (error) {
+ console.error("Error sending face data:", error);
+ toast({
+ title: "Error",
+ description: "Failed to send face data.",
+ variant: "destructive",
+ });
+ }
+ };
+
+ const startDetection = () => {
+ if (!isModelLoaded) return;
+ setIsDetecting(true);
+ setInterval(detectFace, 1000);
+ };
+ return (
+
+
+
+
+
+
+
+
+
+ );
+};
+
+export default WebcamDetection;
diff --git a/package-lock.json b/package-lock.json
index 90b537a..ca4671f 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -14,6 +14,7 @@
"clsx": "^2.1.1",
"encoding": "^0.1.13",
"face-api.js": "^0.22.2",
+ "hls.js": "^1.0.3-0.canary.7275",
"lucide-react": "^0.474.0",
"next": "15.1.6",
"react": "^18.3.1",
@@ -3883,6 +3884,12 @@
"node": ">= 0.4"
}
},
+ "node_modules/hls.js": {
+ "version": "1.0.3-0.canary.7275",
+ "resolved": "https://registry.npmjs.org/hls.js/-/hls.js-1.0.3-0.canary.7275.tgz",
+ "integrity": "sha512-l8y7S4Hq042OpcH91BX2DgGzIslSv8dYF+BQd7Ood+wdJo6qvo0+6bHRc/c+wubSXBbd8KdJDJ0k428zmDOTIQ==",
+ "license": "Apache-2.0"
+ },
"node_modules/iconv-lite": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
diff --git a/package.json b/package.json
index 60cf85e..6288a35 100644
--- a/package.json
+++ b/package.json
@@ -15,6 +15,7 @@
"clsx": "^2.1.1",
"encoding": "^0.1.13",
"face-api.js": "^0.22.2",
+ "hls.js": "^1.0.3-0.canary.7275",
"lucide-react": "^0.474.0",
"next": "15.1.6",
"react": "^18.3.1",