271 lines
7.8 KiB
TypeScript
271 lines
7.8 KiB
TypeScript
import React, { useState, useEffect, useRef } from "react";
|
|
import Hls from "hls.js";
|
|
import * as faceapi from "face-api.js";
|
|
import { Button } from "@/components/ui/button";
|
|
import { Camera } from "lucide-react";
|
|
import { useToast } from "@/hooks/use-toast";
|
|
|
|
const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
|
|
const PADDING = 60;
|
|
const API_URL = "http://localhost:8081/start";
|
|
|
|
const RtspStream: React.FC = () => {
|
|
const [rtspUrl, setRtspUrl] = useState<string>("");
|
|
const [cameraName, setCameraName] = useState<string>("");
|
|
const [m3u8Url, setM3u8Url] = useState<string | null>(null);
|
|
const [loading, setLoading] = useState<boolean>(false);
|
|
const [isModelLoaded, setIsModelLoaded] = useState(false);
|
|
const [isDetecting, setIsDetecting] = useState(false);
|
|
const videoRef = useRef<HTMLVideoElement | null>(null);
|
|
const canvasRef = useRef<HTMLCanvasElement>(null);
|
|
const detectionIntervalRef = useRef<ReturnType<typeof setInterval> | null>(
|
|
null
|
|
);
|
|
const { toast } = useToast();
|
|
|
|
useEffect(() => {
|
|
const loadModels = async () => {
|
|
try {
|
|
await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
|
|
await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
|
|
await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL);
|
|
setIsModelLoaded(true);
|
|
} catch (error) {
|
|
console.error("Error loading models:", error);
|
|
toast({
|
|
title: "Error",
|
|
description: "Failed to load face detection models.",
|
|
variant: "destructive",
|
|
});
|
|
}
|
|
};
|
|
loadModels();
|
|
}, [toast]);
|
|
|
|
useEffect(() => {
|
|
if (m3u8Url && videoRef.current) {
|
|
if (Hls.isSupported()) {
|
|
const hls = new Hls();
|
|
hls.loadSource(m3u8Url);
|
|
hls.attachMedia(videoRef.current);
|
|
} else if (
|
|
videoRef.current.canPlayType("application/vnd.apple.mpegurl")
|
|
) {
|
|
videoRef.current.src = m3u8Url;
|
|
}
|
|
}
|
|
}, [m3u8Url]);
|
|
|
|
const extractFaceWithPadding = (
|
|
video: HTMLVideoElement,
|
|
box: faceapi.Box
|
|
): HTMLCanvasElement => {
|
|
const canvas = document.createElement("canvas");
|
|
const context = canvas.getContext("2d");
|
|
|
|
const x = Math.max(0, box.x - PADDING);
|
|
const y = Math.max(0, box.y - PADDING);
|
|
const width = Math.min(video.videoWidth - x, box.width + 2 * PADDING);
|
|
const height = Math.min(video.videoHeight - y, box.height + 2 * PADDING);
|
|
|
|
canvas.width = width;
|
|
canvas.height = height;
|
|
|
|
if (context) {
|
|
context.drawImage(video, x, y, width, height, 0, 0, width, height);
|
|
}
|
|
|
|
return canvas;
|
|
};
|
|
|
|
const detectFace = async () => {
|
|
if (!videoRef.current || !canvasRef.current || !videoRef.current.videoWidth)
|
|
return;
|
|
|
|
const video = videoRef.current;
|
|
const canvas = canvasRef.current;
|
|
const context = canvas.getContext("2d");
|
|
|
|
if (!context) return;
|
|
|
|
canvas.width = video.videoWidth;
|
|
canvas.height = video.videoHeight;
|
|
context.clearRect(0, 0, canvas.width, canvas.height);
|
|
|
|
const detections = await faceapi
|
|
.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
|
|
.withFaceLandmarks()
|
|
.withFaceDescriptors();
|
|
|
|
if (detections.length > 0) {
|
|
const highConfidenceDetections = detections.filter(
|
|
(detection) => detection.detection.score > 0.5
|
|
);
|
|
|
|
for (const detection of highConfidenceDetections) {
|
|
const { box } = detection.detection;
|
|
context.strokeStyle = "#00FF00";
|
|
context.lineWidth = 2;
|
|
context.strokeRect(box.x, box.y, box.width, box.height);
|
|
context.fillStyle = "#00FF00";
|
|
context.font = "16px Arial";
|
|
context.fillText(
|
|
`Confidence: ${Math.round(detection.detection.score * 100)}%`,
|
|
box.x,
|
|
box.y - 5
|
|
);
|
|
|
|
const faceCanvas = extractFaceWithPadding(video, box);
|
|
faceCanvas.toBlob(
|
|
(blob) => {
|
|
if (blob) sendFaceDataToAPI(blob);
|
|
},
|
|
"image/jpeg",
|
|
0.95
|
|
);
|
|
}
|
|
}
|
|
};
|
|
|
|
const sendFaceDataToAPI = async (imageBlob: Blob) => {
|
|
try {
|
|
const formData = new FormData();
|
|
formData.append("image", imageBlob, "face.jpg");
|
|
|
|
const response = await fetch(
|
|
`${process.env.NEXT_PUBLIC_BASE_URL}/search`,
|
|
{
|
|
method: "POST",
|
|
body: formData,
|
|
}
|
|
);
|
|
|
|
const data = await response.json();
|
|
toast({ title: data?.name, description: data.message });
|
|
} catch (error) {
|
|
console.error("Error sending face data:", error);
|
|
toast({
|
|
title: "Error",
|
|
description: "Failed to send face data.",
|
|
variant: "destructive",
|
|
});
|
|
}
|
|
};
|
|
|
|
const startDetection = () => {
|
|
if (!isModelLoaded || !videoRef.current) return;
|
|
console.log("Starting detection...");
|
|
setIsDetecting(true);
|
|
detectionIntervalRef.current = setInterval(detectFace, 1000);
|
|
};
|
|
|
|
const stopDetection = () => {
|
|
if (detectionIntervalRef.current) {
|
|
clearInterval(detectionIntervalRef.current);
|
|
}
|
|
setIsDetecting(false);
|
|
if (canvasRef.current) {
|
|
const context = canvasRef.current.getContext("2d");
|
|
if (context) {
|
|
context.clearRect(
|
|
0,
|
|
0,
|
|
canvasRef.current.width,
|
|
canvasRef.current.height
|
|
);
|
|
}
|
|
}
|
|
};
|
|
|
|
const handleSubmit = async (e: React.FormEvent) => {
|
|
e.preventDefault();
|
|
setLoading(true);
|
|
stopDetection(); // Stop any ongoing detection
|
|
|
|
try {
|
|
const response = await fetch(API_URL, {
|
|
method: "POST",
|
|
headers: { "Content-Type": "application/json" },
|
|
body: JSON.stringify({
|
|
uri: rtspUrl,
|
|
alias: cameraName,
|
|
}),
|
|
});
|
|
|
|
if (!response.ok) {
|
|
throw new Error("Failed to fetch stream URL");
|
|
}
|
|
|
|
const data = await response.json();
|
|
setM3u8Url(`http://localhost:8081${data?.uri}`);
|
|
console.log("isModelLoaded", isModelLoaded);
|
|
console.log("m3u8Url", m3u8Url);
|
|
} catch (error) {
|
|
console.error("Error fetching stream:", error);
|
|
toast({
|
|
title: "Error",
|
|
description: "Failed to load stream.",
|
|
variant: "destructive",
|
|
});
|
|
} finally {
|
|
setLoading(false);
|
|
}
|
|
};
|
|
|
|
return (
|
|
<div className="max-w-3xl mx-auto p-4">
|
|
<h2 className="text-2xl font-bold mb-4">
|
|
RTSP Stream with Face Detection
|
|
</h2>
|
|
<form onSubmit={handleSubmit} className="space-y-4 mb-6">
|
|
<input
|
|
type="text"
|
|
value={rtspUrl}
|
|
onChange={(e) => setRtspUrl(e.target.value)}
|
|
placeholder="Enter RTSP URL"
|
|
className="w-full p-2 border rounded"
|
|
required
|
|
/>
|
|
<input
|
|
type="text"
|
|
value={cameraName}
|
|
onChange={(e) => setCameraName(e.target.value)}
|
|
placeholder="Enter Camera Name"
|
|
className="w-full p-2 border rounded"
|
|
required
|
|
/>
|
|
<Button type="submit" disabled={loading} className="w-full">
|
|
{loading ? "Starting stream..." : "Start Stream"}
|
|
</Button>
|
|
</form>
|
|
|
|
{m3u8Url && !loading && (
|
|
<div className="relative">
|
|
<video
|
|
ref={videoRef}
|
|
controls
|
|
autoPlay
|
|
className="w-full rounded-lg"
|
|
/>
|
|
<canvas
|
|
ref={canvasRef}
|
|
className="absolute top-0 left-0 w-full h-full z-0 pointer-events-none"
|
|
/>
|
|
|
|
<div className="mt-4 flex justify-center">
|
|
<Button
|
|
onClick={isDetecting ? stopDetection : startDetection}
|
|
disabled={!isModelLoaded || !m3u8Url}
|
|
>
|
|
<Camera className="mr-2 h-4 w-4" />
|
|
{isDetecting ? "Stop Detection" : "Start Detection"}
|
|
</Button>
|
|
</div>
|
|
</div>
|
|
)}
|
|
</div>
|
|
);
|
|
};
|
|
|
|
export default RtspStream;
|