confidance score added

This commit is contained in:
Somdev Das 2025-02-05 18:52:12 +05:30
parent adf9d8ef3e
commit a0d6b7a73a

View File

@ -8,7 +8,7 @@ import { Camera } from "lucide-react";
import { useToast } from "@/hooks/use-toast";
const MODEL_URL = "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model";
const PADDING = 50; // Padding around face in pixels
const PADDING = 60;
const RealtimeFaceDetection = () => {
const webcamRef = useRef<Webcam>(null);
@ -53,18 +53,7 @@ const RealtimeFaceDetection = () => {
canvas.height = height;
if (context) {
// Extract face region with padding
context.drawImage(
video,
x,
y,
width,
height, // Source coordinates
0,
0,
width,
height // Destination coordinates
);
context.drawImage(video, x, y, width, height, 0, 0, width, height);
}
return canvas;
@ -81,34 +70,45 @@ const RealtimeFaceDetection = () => {
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
context.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings
context.clearRect(0, 0, canvas.width, canvas.height);
context.translate(canvas.width, 0);
context.scale(-1, 1);
// Mirror the canvas context to match the mirrored video
context.translate(canvas.width, 0); // Move the origin to the right side of the canvas
context.scale(-1, 1); // Flip the context horizontally
// Detect all faces
const detections = await faceapi
.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors();
for (const detection of detections) {
// Draw box for visualization
const { box } = detection.detection;
context.strokeStyle = "#00FF00";
context.lineWidth = 2;
context.strokeRect(box.x, box.y, box.width, box.height);
// Extract face with padding and send to API
const faceCanvas = extractFaceWithPadding(video, box);
faceCanvas.toBlob(
(blob) => {
if (blob) sendFaceDataToAPI(blob);
},
"image/jpeg",
0.95
if (detections.length > 0) {
const highConfidenceDetections = detections.filter(
(detection) => detection.detection.score > 0.7
);
for (const detection of highConfidenceDetections) {
const { box } = detection.detection;
context.strokeStyle = "#00FF00";
context.lineWidth = 2;
context.strokeRect(box.x, box.y, box.width, box.height);
context.save();
context.scale(-1, 1);
context.fillStyle = "#00FF00";
context.font = "16px Arial";
context.fillText(
`Confidence: ${Math.round(detection.detection.score * 100)}%`,
-box.x - box.width,
box.y - 5
);
context.restore();
const faceCanvas = extractFaceWithPadding(video, box);
faceCanvas.toBlob(
(blob) => {
if (blob) sendFaceDataToAPI(blob);
},
"image/jpeg",
0.95
);
}
}
};
@ -140,7 +140,7 @@ const RealtimeFaceDetection = () => {
const startDetection = () => {
if (!isModelLoaded) return;
setIsDetecting(true);
setInterval(detectFace, 300);
setInterval(detectFace, 1000);
};
return (
<div className="max-w-3xl mx-auto">