face detection integrated in rtsp stream
This commit is contained in:
parent
139cca44f5
commit
d2fa91dcf5
@ -1,14 +1,46 @@
|
||||
import React, { useState, useEffect, useRef } from "react";
|
||||
import Hls from "hls.js";
|
||||
import * as faceapi from "face-api.js";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Camera } from "lucide-react";
|
||||
import { useToast } from "@/hooks/use-toast";
|
||||
|
||||
const API_URL = "http://localhost:8081/start"; // Replace with your actual API endpoint
|
||||
const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
|
||||
const PADDING = 60;
|
||||
const API_URL = "http://localhost:8081/start";
|
||||
|
||||
const RtspStream: React.FC = () => {
|
||||
const [rtspUrl, setRtspUrl] = useState<string>("");
|
||||
const [cameraName, setCameraName] = useState<string>("");
|
||||
const [m3u8Url, setM3u8Url] = useState<string | null>(null);
|
||||
const [loading, setLoading] = useState<boolean>(false); // Loading state
|
||||
const [loading, setLoading] = useState<boolean>(false);
|
||||
const [isModelLoaded, setIsModelLoaded] = useState(false);
|
||||
const [isDetecting, setIsDetecting] = useState(false);
|
||||
const videoRef = useRef<HTMLVideoElement | null>(null);
|
||||
const canvasRef = useRef<HTMLCanvasElement>(null);
|
||||
const detectionIntervalRef = useRef<ReturnType<typeof setInterval> | null>(
|
||||
null
|
||||
);
|
||||
const { toast } = useToast();
|
||||
|
||||
useEffect(() => {
|
||||
const loadModels = async () => {
|
||||
try {
|
||||
await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
|
||||
await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
|
||||
await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL);
|
||||
setIsModelLoaded(true);
|
||||
} catch (error) {
|
||||
console.error("Error loading models:", error);
|
||||
toast({
|
||||
title: "Error",
|
||||
description: "Failed to load face detection models.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
};
|
||||
loadModels();
|
||||
}, [toast]);
|
||||
|
||||
useEffect(() => {
|
||||
if (m3u8Url && videoRef.current) {
|
||||
@ -24,9 +56,131 @@ const RtspStream: React.FC = () => {
|
||||
}
|
||||
}, [m3u8Url]);
|
||||
|
||||
const extractFaceWithPadding = (
|
||||
video: HTMLVideoElement,
|
||||
box: faceapi.Box
|
||||
): HTMLCanvasElement => {
|
||||
const canvas = document.createElement("canvas");
|
||||
const context = canvas.getContext("2d");
|
||||
|
||||
const x = Math.max(0, box.x - PADDING);
|
||||
const y = Math.max(0, box.y - PADDING);
|
||||
const width = Math.min(video.videoWidth - x, box.width + 2 * PADDING);
|
||||
const height = Math.min(video.videoHeight - y, box.height + 2 * PADDING);
|
||||
|
||||
canvas.width = width;
|
||||
canvas.height = height;
|
||||
|
||||
if (context) {
|
||||
context.drawImage(video, x, y, width, height, 0, 0, width, height);
|
||||
}
|
||||
|
||||
return canvas;
|
||||
};
|
||||
|
||||
const detectFace = async () => {
|
||||
if (!videoRef.current || !canvasRef.current || !videoRef.current.videoWidth)
|
||||
return;
|
||||
|
||||
const video = videoRef.current;
|
||||
const canvas = canvasRef.current;
|
||||
const context = canvas.getContext("2d");
|
||||
|
||||
if (!context) return;
|
||||
|
||||
canvas.width = video.videoWidth;
|
||||
canvas.height = video.videoHeight;
|
||||
context.clearRect(0, 0, canvas.width, canvas.height);
|
||||
|
||||
const detections = await faceapi
|
||||
.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
|
||||
.withFaceLandmarks()
|
||||
.withFaceDescriptors();
|
||||
|
||||
if (detections.length > 0) {
|
||||
const highConfidenceDetections = detections.filter(
|
||||
(detection) => detection.detection.score > 0.5
|
||||
);
|
||||
|
||||
for (const detection of highConfidenceDetections) {
|
||||
const { box } = detection.detection;
|
||||
context.strokeStyle = "#00FF00";
|
||||
context.lineWidth = 2;
|
||||
context.strokeRect(box.x, box.y, box.width, box.height);
|
||||
context.fillStyle = "#00FF00";
|
||||
context.font = "16px Arial";
|
||||
context.fillText(
|
||||
`Confidence: ${Math.round(detection.detection.score * 100)}%`,
|
||||
box.x,
|
||||
box.y - 5
|
||||
);
|
||||
|
||||
const faceCanvas = extractFaceWithPadding(video, box);
|
||||
faceCanvas.toBlob(
|
||||
(blob) => {
|
||||
if (blob) sendFaceDataToAPI(blob);
|
||||
},
|
||||
"image/jpeg",
|
||||
0.95
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const sendFaceDataToAPI = async (imageBlob: Blob) => {
|
||||
try {
|
||||
const formData = new FormData();
|
||||
formData.append("image", imageBlob, "face.jpg");
|
||||
|
||||
const response = await fetch(
|
||||
`${process.env.NEXT_PUBLIC_BASE_URL}/search`,
|
||||
{
|
||||
method: "POST",
|
||||
body: formData,
|
||||
}
|
||||
);
|
||||
|
||||
const data = await response.json();
|
||||
toast({ title: data?.name, description: data.message });
|
||||
} catch (error) {
|
||||
console.error("Error sending face data:", error);
|
||||
toast({
|
||||
title: "Error",
|
||||
description: "Failed to send face data.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const startDetection = () => {
|
||||
if (!isModelLoaded || !videoRef.current) return;
|
||||
console.log("Starting detection...");
|
||||
setIsDetecting(true);
|
||||
detectionIntervalRef.current = setInterval(detectFace, 1000);
|
||||
};
|
||||
|
||||
const stopDetection = () => {
|
||||
if (detectionIntervalRef.current) {
|
||||
clearInterval(detectionIntervalRef.current);
|
||||
}
|
||||
setIsDetecting(false);
|
||||
if (canvasRef.current) {
|
||||
const context = canvasRef.current.getContext("2d");
|
||||
if (context) {
|
||||
context.clearRect(
|
||||
0,
|
||||
0,
|
||||
canvasRef.current.width,
|
||||
canvasRef.current.height
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleSubmit = async (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
setLoading(true); // Set loading to true when submitting
|
||||
setLoading(true);
|
||||
stopDetection(); // Stop any ongoing detection
|
||||
|
||||
try {
|
||||
const response = await fetch(API_URL, {
|
||||
@ -43,64 +197,71 @@ const RtspStream: React.FC = () => {
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
console.log("Stream data:", data);
|
||||
setM3u8Url(`http://localhost:8081${data?.uri}`);
|
||||
console.log("isModelLoaded", isModelLoaded);
|
||||
console.log("m3u8Url", m3u8Url);
|
||||
} catch (error) {
|
||||
console.error("Error fetching stream:", error);
|
||||
alert("Failed to load stream.");
|
||||
toast({
|
||||
title: "Error",
|
||||
description: "Failed to load stream.",
|
||||
variant: "destructive",
|
||||
});
|
||||
} finally {
|
||||
setLoading(false); // Reset loading state after API response
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div style={{ maxWidth: "600px", margin: "auto", textAlign: "center" }}>
|
||||
<h2>RTSP Stream</h2>
|
||||
<form onSubmit={handleSubmit}>
|
||||
<div className="max-w-3xl mx-auto p-4">
|
||||
<h2 className="text-2xl font-bold mb-4">
|
||||
RTSP Stream with Face Detection
|
||||
</h2>
|
||||
<form onSubmit={handleSubmit} className="space-y-4 mb-6">
|
||||
<input
|
||||
type="text"
|
||||
value={rtspUrl}
|
||||
onChange={(e) => setRtspUrl(e.target.value)}
|
||||
placeholder="Enter RTSP URL"
|
||||
style={{ width: "80%", padding: "8px", marginBottom: "10px" }}
|
||||
className="w-full p-2 border rounded"
|
||||
required
|
||||
/>
|
||||
<br />
|
||||
<input
|
||||
type="text"
|
||||
value={cameraName}
|
||||
onChange={(e) => setCameraName(e.target.value)}
|
||||
placeholder="Enter Camera Name"
|
||||
style={{ width: "80%", padding: "8px", marginBottom: "10px" }}
|
||||
className="w-full p-2 border rounded"
|
||||
required
|
||||
/>
|
||||
<br />
|
||||
<button
|
||||
type="submit"
|
||||
style={{
|
||||
padding: "8px 12px",
|
||||
cursor: loading ? "not-allowed" : "pointer",
|
||||
opacity: loading ? 0.6 : 1,
|
||||
}}
|
||||
disabled={loading}
|
||||
>
|
||||
<Button type="submit" disabled={loading} className="w-full">
|
||||
{loading ? "Starting stream..." : "Start Stream"}
|
||||
</button>
|
||||
</Button>
|
||||
</form>
|
||||
|
||||
{loading && (
|
||||
<p style={{ marginTop: "15px", fontWeight: "bold" }}>
|
||||
Stream is starting...
|
||||
</p>
|
||||
)}
|
||||
|
||||
{m3u8Url && !loading && (
|
||||
<video
|
||||
ref={videoRef}
|
||||
controls
|
||||
autoPlay
|
||||
style={{ width: "100%", marginTop: "20px" }}
|
||||
/>
|
||||
<div className="relative">
|
||||
<video
|
||||
ref={videoRef}
|
||||
controls
|
||||
autoPlay
|
||||
className="w-full rounded-lg"
|
||||
/>
|
||||
<canvas
|
||||
ref={canvasRef}
|
||||
className="absolute top-0 left-0 w-full h-full z-0 pointer-events-none"
|
||||
/>
|
||||
|
||||
<div className="mt-4 flex justify-center">
|
||||
<Button
|
||||
onClick={isDetecting ? stopDetection : startDetection}
|
||||
disabled={!isModelLoaded || !m3u8Url}
|
||||
>
|
||||
<Camera className="mr-2 h-4 w-4" />
|
||||
{isDetecting ? "Stop Detection" : "Start Detection"}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
|
Loading…
x
Reference in New Issue
Block a user