import styled from '@emotion/styled'; import { Button, ActionIcon, Textarea, Loader, Popover } from '@mantine/core'; import { getHotkeyHandler, useHotkeys, useMediaQuery } from '@mantine/hooks'; import { useCallback, useEffect, useMemo, useState } from 'react'; import { FormattedMessage, useIntl } from 'react-intl'; import { useLocation, useNavigate } from 'react-router-dom'; import { useAppContext } from '../core/context'; import { useAppDispatch, useAppSelector } from '../store'; import { selectMessage, setMessage } from '../store/message'; import { selectSettingsTab, openOpenAIApiKeyPanel } from '../store/settings-ui'; import { speechRecognition, supportsSpeechRecognition } from '../core/speech-recognition-types' import { useWhisper } from '@chengsokdara/use-whisper'; import QuickSettings from './quick-settings'; import { useOption } from '../core/options/use-option'; const Container = styled.div` background: #292933; border-top: thin solid #393933; padding: 1rem 1rem 0 1rem; .inner { max-width: 50rem; margin: auto; text-align: right; } .settings-button { margin: 0.5rem -0.4rem 0.5rem 1rem; font-size: 0.7rem; color: #999; } `; export declare type OnSubmit = (name?: string) => Promise; export interface MessageInputProps { disabled?: boolean; } export default function MessageInput(props: MessageInputProps) { const message = useAppSelector(selectMessage); const [recording, setRecording] = useState(false); const [speechError, setSpeechError] = useState(null); const hasVerticalSpace = useMediaQuery('(min-height: 1000px)'); const [useOpenAIWhisper] = useOption('speech-recognition', 'use-whisper'); const [openAIApiKey] = useOption('openai', 'apiKey'); const [initialMessage, setInitialMessage] = useState(''); const { transcribing, transcript, startRecording, stopRecording, } = useWhisper({ apiKey: openAIApiKey || ' ', streaming: false, }); const navigate = useNavigate(); const context = useAppContext(); const dispatch = useAppDispatch(); const intl = useIntl(); const tab = useAppSelector(selectSettingsTab); const [showMicrophoneButton] = useOption('speech-recognition', 'show-microphone'); const [submitOnEnter] = useOption('input', 'submit-on-enter'); const onChange = useCallback((e: React.ChangeEvent) => { dispatch(setMessage(e.target.value)); }, [dispatch]); const pathname = useLocation().pathname; const onSubmit = useCallback(async () => { setSpeechError(null); const id = await context.onNewMessage(message); if (id) { if (!window.location.pathname.includes(id)) { navigate('/chat/' + id); } dispatch(setMessage('')); } }, [context, message, dispatch, navigate]); const onSpeechError = useCallback((e: any) => { console.error('speech recognition error', e); setSpeechError(e.message); try { speechRecognition?.stop(); } catch (e) { } try { stopRecording(); } catch (e) { } setRecording(false); }, [stopRecording]); const onHideSpeechError = useCallback(() => setSpeechError(null), []); const onSpeechStart = useCallback(async () => { let granted = false; let denied = false; try { const result = await navigator.permissions.query({ name: 'microphone' as any }); if (result.state == 'granted') { granted = true; } else if (result.state == 'denied') { denied = true; } } catch (e) { } if (!granted && !denied) { try { const stream = await navigator.mediaDevices.getUserMedia({ video: false, audio: true }); stream.getTracks().forEach(track => track.stop()); granted = true; } catch (e) { denied = true; } } if (denied) { onSpeechError(new Error('speech permission was not granted')); return; } try { if (!recording) { setRecording(true); if (useOpenAIWhisper || !supportsSpeechRecognition) { if (!openAIApiKey) { dispatch(openOpenAIApiKeyPanel()); return false; } // recorder.start().catch(onSpeechError); setInitialMessage(message); await startRecording(); } else if (speechRecognition) { const initialMessage = message; speechRecognition.continuous = true; speechRecognition.interimResults = true; speechRecognition.onresult = (event) => { let transcript = ''; for (let i = 0; i < event.results.length; i++) { if (event.results[i].isFinal && event.results[i][0].confidence) { transcript += event.results[i][0].transcript; } } dispatch(setMessage(initialMessage + ' ' + transcript)); }; speechRecognition.start(); } else { onSpeechError(new Error('not supported')); } } else { if (useOpenAIWhisper || !supportsSpeechRecognition) { await stopRecording(); setTimeout(() => setRecording(false), 500); } else if (speechRecognition) { speechRecognition.stop(); setRecording(false); } else { onSpeechError(new Error('not supported')); } } } catch (e) { onSpeechError(e); } }, [recording, message, dispatch, onSpeechError, setInitialMessage, openAIApiKey]); useEffect(() => { if (useOpenAIWhisper || !supportsSpeechRecognition) { if (!transcribing && !recording && transcript?.text) { dispatch(setMessage(initialMessage + ' ' + transcript.text)); } } }, [initialMessage, transcript, recording, transcribing, useOpenAIWhisper, dispatch]); useHotkeys([ ['n', () => document.querySelector('#message-input')?.focus()] ]); const blur = useCallback(() => { document.querySelector('#message-input')?.blur(); }, []); const rightSection = useMemo(() => { return (
{context.generating && (<> )} {!context.generating && ( <> {showMicrophoneButton && {transcribing && } {!transcribing && }

Sorry, an error occured trying to record audio.

} )}
); }, [recording, transcribing, onSubmit, onSpeechStart, props.disabled, context.generating, speechError, onHideSpeechError, showMicrophoneButton]); const disabled = context.generating; const isLandingPage = pathname === '/'; if (context.isShare || (!isLandingPage && !context.id)) { return null; } const hotkeyHandler = useMemo(() => { const keys = [ ['Escape', blur, { preventDefault: true }], ]; if (submitOnEnter) { keys.unshift(['Enter', onSubmit, { preventDefault: true }]); } const handler = getHotkeyHandler(keys as any); return handler; }, [onSubmit, blur, submitOnEnter]); return