confidance score added

This commit is contained in:
Somdev Das 2025-02-05 18:52:12 +05:30
parent adf9d8ef3e
commit a0d6b7a73a

View File

@ -8,7 +8,7 @@ import { Camera } from "lucide-react";
import { useToast } from "@/hooks/use-toast"; import { useToast } from "@/hooks/use-toast";
const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model"; const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
const PADDING = 50; // Padding around face in pixels const PADDING = 60;
const RealtimeFaceDetection = () => { const RealtimeFaceDetection = () => {
const webcamRef = useRef<Webcam>(null); const webcamRef = useRef<Webcam>(null);
@ -53,18 +53,7 @@ const RealtimeFaceDetection = () => {
canvas.height = height; canvas.height = height;
if (context) { if (context) {
// Extract face region with padding context.drawImage(video, x, y, width, height, 0, 0, width, height);
context.drawImage(
video,
x,
y,
width,
height, // Source coordinates
0,
0,
width,
height // Destination coordinates
);
} }
return canvas; return canvas;
@ -81,26 +70,36 @@ const RealtimeFaceDetection = () => {
canvas.width = video.videoWidth; canvas.width = video.videoWidth;
canvas.height = video.videoHeight; canvas.height = video.videoHeight;
context.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings context.clearRect(0, 0, canvas.width, canvas.height);
context.translate(canvas.width, 0);
context.scale(-1, 1);
// Mirror the canvas context to match the mirrored video
context.translate(canvas.width, 0); // Move the origin to the right side of the canvas
context.scale(-1, 1); // Flip the context horizontally
// Detect all faces
const detections = await faceapi const detections = await faceapi
.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()) .detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
.withFaceLandmarks() .withFaceLandmarks()
.withFaceDescriptors(); .withFaceDescriptors();
for (const detection of detections) { if (detections.length > 0) {
// Draw box for visualization const highConfidenceDetections = detections.filter(
(detection) => detection.detection.score > 0.7
);
for (const detection of highConfidenceDetections) {
const { box } = detection.detection; const { box } = detection.detection;
context.strokeStyle = "#00FF00"; context.strokeStyle = "#00FF00";
context.lineWidth = 2; context.lineWidth = 2;
context.strokeRect(box.x, box.y, box.width, box.height); context.strokeRect(box.x, box.y, box.width, box.height);
context.save();
context.scale(-1, 1);
context.fillStyle = "#00FF00";
context.font = "16px Arial";
context.fillText(
`Confidence: ${Math.round(detection.detection.score * 100)}%`,
-box.x - box.width,
box.y - 5
);
context.restore();
// Extract face with padding and send to API
const faceCanvas = extractFaceWithPadding(video, box); const faceCanvas = extractFaceWithPadding(video, box);
faceCanvas.toBlob( faceCanvas.toBlob(
(blob) => { (blob) => {
@ -110,6 +109,7 @@ const RealtimeFaceDetection = () => {
0.95 0.95
); );
} }
}
}; };
const sendFaceDataToAPI = async (imageBlob: Blob) => { const sendFaceDataToAPI = async (imageBlob: Blob) => {
@ -140,7 +140,7 @@ const RealtimeFaceDetection = () => {
const startDetection = () => { const startDetection = () => {
if (!isModelLoaded) return; if (!isModelLoaded) return;
setIsDetecting(true); setIsDetecting(true);
setInterval(detectFace, 300); setInterval(detectFace, 1000);
}; };
return ( return (
<div className="max-w-3xl mx-auto"> <div className="max-w-3xl mx-auto">