bounding box implemented
This commit is contained in:
parent
99f5e148b9
commit
ca38b7c7f4
@ -36,32 +36,65 @@ const RealtimeFaceDetection = () => {
|
||||
}, [toast]);
|
||||
|
||||
const detectFace = async () => {
|
||||
if (!webcamRef.current || !webcamRef.current.video) return;
|
||||
if (!webcamRef.current || !webcamRef.current.video || !canvasRef.current)
|
||||
return;
|
||||
|
||||
const video = webcamRef.current.video;
|
||||
const canvas = document.createElement("canvas");
|
||||
const canvas = canvasRef.current;
|
||||
const context = canvas.getContext("2d");
|
||||
|
||||
if (!context) return;
|
||||
|
||||
// Set canvas size to match video
|
||||
canvas.width = video.videoWidth;
|
||||
canvas.height = video.videoHeight;
|
||||
context.drawImage(video, 0, 0, canvas.width, canvas.height);
|
||||
context.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings
|
||||
|
||||
// Convert the canvas to a Blob (image file)
|
||||
canvas.toBlob(async (blob) => {
|
||||
if (!blob) return;
|
||||
// Mirror the canvas context to match the mirrored video
|
||||
context.translate(canvas.width, 0); // Move the origin to the right side of the canvas
|
||||
context.scale(-1, 1); // Flip the context horizontally
|
||||
|
||||
// Detect face
|
||||
const detections = await faceapi
|
||||
.detectSingleFace(video, new faceapi.TinyFaceDetectorOptions())
|
||||
.withFaceLandmarks()
|
||||
.withFaceDescriptor();
|
||||
// Detect face
|
||||
const detections = await faceapi
|
||||
.detectSingleFace(video, new faceapi.TinyFaceDetectorOptions())
|
||||
.withFaceLandmarks()
|
||||
.withFaceDescriptor();
|
||||
|
||||
if (detections) {
|
||||
sendFaceDataToAPI(blob);
|
||||
if (detections) {
|
||||
// Draw bounding box
|
||||
const { x, y, width, height } = detections.detection.box;
|
||||
context.strokeStyle = "red"; // Box color
|
||||
context.lineWidth = 3;
|
||||
context.strokeRect(x, y, width, height);
|
||||
|
||||
// Capture the face as an image
|
||||
const imageCanvas = document.createElement("canvas");
|
||||
const imageContext = imageCanvas.getContext("2d");
|
||||
|
||||
if (imageContext) {
|
||||
imageCanvas.width = video.videoWidth;
|
||||
imageCanvas.height = video.videoHeight;
|
||||
|
||||
// Mirror the image context as well
|
||||
imageContext.translate(imageCanvas.width, 0);
|
||||
imageContext.scale(-1, 1);
|
||||
|
||||
imageContext.drawImage(
|
||||
video,
|
||||
0,
|
||||
0,
|
||||
imageCanvas.width,
|
||||
imageCanvas.height
|
||||
);
|
||||
|
||||
// Convert to Blob and send
|
||||
imageCanvas.toBlob((blob) => {
|
||||
if (blob) {
|
||||
sendFaceDataToAPI(blob);
|
||||
}
|
||||
}, "image/jpeg");
|
||||
}
|
||||
}, "image/jpeg"); // Save image as JPEG
|
||||
}
|
||||
};
|
||||
|
||||
const sendFaceDataToAPI = async (imageBlob: Blob) => {
|
||||
|
Loading…
x
Reference in New Issue
Block a user