127 lines
3.9 KiB
TypeScript

import { useEffect, useRef, useCallback, useState } from 'react';
import { io, Socket } from 'socket.io-client';
const SOCKET_URL = import.meta.env.VITE_SOCKET_URL || 'http://localhost:3001';
export type AIState = 'idle' | 'listening' | 'thinking' | 'speaking';
interface UseSocketOptions {
onTranscript?: (text: string) => void;
onAudioResponse?: (audio: ArrayBuffer) => void;
onStateChange?: (state: AIState) => void;
onSessionCreated?: (data: { sessionId: string; stage: string }) => void;
onFaceResult?: (data: { verified: boolean; message: string }) => void;
onError?: (err: { message: string }) => void;
onInterviewEnded?: (data: { sessionId: string }) => void;
}
/**
* Custom hook managing the Socket.io connection to the interview gateway.
* Handles audio chunk emission, TTS playback, and all socket events.
*/
export function useSocket(options: UseSocketOptions = {}) {
const socketRef = useRef<Socket | null>(null);
const [connected, setConnected] = useState(false);
const [aiState, setAiState] = useState<AIState>('idle');
const audioContextRef = useRef<AudioContext | null>(null);
// ── Connect on mount ──
useEffect(() => {
const socket = io(`${SOCKET_URL}/interview`, {
transports: ['websocket'],
autoConnect: true,
});
socketRef.current = socket;
socket.on('connect', () => setConnected(true));
socket.on('disconnect', () => setConnected(false));
socket.on('session-created', (data) => {
options.onSessionCreated?.(data);
});
socket.on('ai-transcript', (data: { text: string }) => {
options.onTranscript?.(data.text);
});
socket.on('ai-audio', (audioData: ArrayBuffer) => {
options.onAudioResponse?.(audioData);
playAudio(audioData);
});
socket.on('ai-state', (data: { state: AIState }) => {
setAiState(data.state);
options.onStateChange?.(data.state);
});
socket.on('face-result', (data) => {
options.onFaceResult?.(data);
});
socket.on('interview-ended', (data) => {
options.onInterviewEnded?.(data);
});
socket.on('error', (err) => {
options.onError?.(err);
});
return () => {
socket.disconnect();
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
// ── Audio playback ──
const playAudio = useCallback(async (audioData: ArrayBuffer) => {
try {
if (!audioContextRef.current) {
audioContextRef.current = new AudioContext();
}
const ctx = audioContextRef.current;
const audioBuffer = await ctx.decodeAudioData(audioData.slice(0));
const source = ctx.createBufferSource();
source.buffer = audioBuffer;
source.connect(ctx.destination);
source.start(0);
} catch (err) {
console.error('Audio playback failed:', err);
}
}, []);
// ── Emit helpers ──
const joinRoom = useCallback((candidateId: string) => {
socketRef.current?.emit('join-room', { candidateId });
}, []);
const sendAudioChunk = useCallback((chunk: ArrayBuffer) => {
socketRef.current?.emit('audio-chunk', chunk);
}, []);
const signalEndOfSpeech = useCallback(() => {
socketRef.current?.emit('end-of-speech');
}, []);
const sendFaceFrame = useCallback(
(candidateId: string, frame: ArrayBuffer) => {
socketRef.current?.emit('face-verify', { candidateId, frame });
},
[],
);
const endInterview = useCallback(() => {
socketRef.current?.emit('end-interview');
}, []);
return {
connected,
aiState,
joinRoom,
sendAudioChunk,
signalEndOfSpeech,
sendFaceFrame,
endInterview,
};
}