import React, { useState, useEffect, useRef } from "react"; import Hls from "hls.js"; import * as faceapi from "face-api.js"; import { Button } from "@/components/ui/button"; import { Camera } from "lucide-react"; import { useToast } from "@/hooks/use-toast"; const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model"; const PADDING = 60; const API_URL = "http://localhost:8081/start"; const RtspStream: React.FC = () => { const [rtspUrl, setRtspUrl] = useState(""); const [cameraName, setCameraName] = useState(""); const [m3u8Url, setM3u8Url] = useState(null); const [loading, setLoading] = useState(false); const [isModelLoaded, setIsModelLoaded] = useState(false); const [isDetecting, setIsDetecting] = useState(false); const videoRef = useRef(null); const canvasRef = useRef(null); const detectionIntervalRef = useRef | null>( null ); const { toast } = useToast(); useEffect(() => { const loadModels = async () => { try { await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL); await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL); await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL); setIsModelLoaded(true); } catch (error) { console.error("Error loading models:", error); toast({ title: "Error", description: "Failed to load face detection models.", variant: "destructive", }); } }; loadModels(); }, [toast]); useEffect(() => { if (m3u8Url && videoRef.current) { if (Hls.isSupported()) { const hls = new Hls(); hls.loadSource(m3u8Url); hls.attachMedia(videoRef.current); } else if ( videoRef.current.canPlayType("application/vnd.apple.mpegurl") ) { videoRef.current.src = m3u8Url; } } }, [m3u8Url]); const extractFaceWithPadding = ( video: HTMLVideoElement, box: faceapi.Box ): HTMLCanvasElement => { const canvas = document.createElement("canvas"); const context = canvas.getContext("2d"); const x = Math.max(0, box.x - PADDING); const y = Math.max(0, box.y - PADDING); const width = Math.min(video.videoWidth - x, box.width + 2 * PADDING); const height = Math.min(video.videoHeight - y, box.height + 2 * PADDING); canvas.width = width; canvas.height = height; if (context) { context.drawImage(video, x, y, width, height, 0, 0, width, height); } return canvas; }; const detectFace = async () => { if (!videoRef.current || !canvasRef.current || !videoRef.current.videoWidth) return; const video = videoRef.current; const canvas = canvasRef.current; const context = canvas.getContext("2d"); if (!context) return; canvas.width = video.videoWidth; canvas.height = video.videoHeight; context.clearRect(0, 0, canvas.width, canvas.height); const detections = await faceapi .detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()) .withFaceLandmarks() .withFaceDescriptors(); if (detections.length > 0) { const highConfidenceDetections = detections.filter( (detection) => detection.detection.score > 0.5 ); for (const detection of highConfidenceDetections) { const { box } = detection.detection; context.strokeStyle = "#00FF00"; context.lineWidth = 2; context.strokeRect(box.x, box.y, box.width, box.height); context.fillStyle = "#00FF00"; context.font = "16px Arial"; context.fillText( `Confidence: ${Math.round(detection.detection.score * 100)}%`, box.x, box.y - 5 ); const faceCanvas = extractFaceWithPadding(video, box); faceCanvas.toBlob( (blob) => { if (blob) sendFaceDataToAPI(blob); }, "image/jpeg", 0.95 ); } } }; const sendFaceDataToAPI = async (imageBlob: Blob) => { try { const formData = new FormData(); formData.append("image", imageBlob, "face.jpg"); const response = await fetch( `${process.env.NEXT_PUBLIC_BASE_URL}/search`, { method: "POST", body: formData, } ); const data = await response.json(); toast({ title: data?.name, description: data.message }); } catch (error) { console.error("Error sending face data:", error); toast({ title: "Error", description: "Failed to send face data.", variant: "destructive", }); } }; const startDetection = () => { if (!isModelLoaded || !videoRef.current) return; console.log("Starting detection..."); setIsDetecting(true); detectionIntervalRef.current = setInterval(detectFace, 1000); }; const stopDetection = () => { if (detectionIntervalRef.current) { clearInterval(detectionIntervalRef.current); } setIsDetecting(false); if (canvasRef.current) { const context = canvasRef.current.getContext("2d"); if (context) { context.clearRect( 0, 0, canvasRef.current.width, canvasRef.current.height ); } } }; const handleSubmit = async (e: React.FormEvent) => { e.preventDefault(); setLoading(true); stopDetection(); // Stop any ongoing detection try { const response = await fetch(API_URL, { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ uri: rtspUrl, alias: cameraName, }), }); if (!response.ok) { throw new Error("Failed to fetch stream URL"); } const data = await response.json(); setM3u8Url(`http://localhost:8081${data?.uri}`); console.log("isModelLoaded", isModelLoaded); console.log("m3u8Url", m3u8Url); } catch (error) { console.error("Error fetching stream:", error); toast({ title: "Error", description: "Failed to load stream.", variant: "destructive", }); } finally { setLoading(false); } }; return (

RTSP Stream with Face Detection

setRtspUrl(e.target.value)} placeholder="Enter RTSP URL" className="w-full p-2 border rounded" required /> setCameraName(e.target.value)} placeholder="Enter Camera Name" className="w-full p-2 border rounded" required />
{m3u8Url && !loading && (
)}
); }; export default RtspStream;