fallback to whisper when speechrecognition is not available

main
Cogent Apps 2023-03-20 21:03:12 +00:00
parent 1917bc19e0
commit 6359c9f50d
3 changed files with 29 additions and 14 deletions

View File

@ -8,8 +8,8 @@ import { useAppContext } from '../context';
import { useAppDispatch, useAppSelector } from '../store';
import { selectMessage, setMessage } from '../store/message';
import { selectTemperature } from '../store/parameters';
import { openSystemPromptPanel, openTemperaturePanel } from '../store/settings-ui';
import { speechRecognition } from '../speech-recognition-types.d'
import { openOpenAIApiKeyPanel, openSystemPromptPanel, openTemperaturePanel } from '../store/settings-ui';
import { speechRecognition, supportsSpeechRecognition } from '../speech-recognition-types'
import MicRecorder from 'mic-recorder-to-mp3';
import { selectUseOpenAIWhisper, selectOpenAIApiKey } from '../store/api-keys';
import { Mp3Encoder } from 'lamejs';
@ -109,7 +109,7 @@ export default function MessageInput(props: MessageInputProps) {
console.error('speech recognition error', e);
try {
speechRecognition.stop();
speechRecognition?.stop();
} catch (e) {
}
@ -122,14 +122,19 @@ export default function MessageInput(props: MessageInputProps) {
}, [recorder]);
const onSpeechStart = useCallback(() => {
if (!openAIApiKey) {
dispatch(openOpenAIApiKeyPanel());
return false;
}
try {
if (!recording) {
setRecording(true);
// if we are using whisper, the we will just record with the browser and send the api when done
if (useOpenAIWhisper) {
if (useOpenAIWhisper || !supportsSpeechRecognition) {
recorder.start().catch(onSpeechError);
} else {
} else if (speechRecognition) {
const initialMessage = message;
speechRecognition.continuous = true;
@ -146,10 +151,12 @@ export default function MessageInput(props: MessageInputProps) {
};
speechRecognition.start();
} else {
onSpeechError(new Error('not supported'));
}
} else {
setRecording(false);
if (useOpenAIWhisper) {
if (useOpenAIWhisper || !supportsSpeechRecognition) {
setTranscribing(true);
const mp3 = recorder.stop().getMp3();
@ -185,14 +192,16 @@ export default function MessageInput(props: MessageInputProps) {
}
}).catch(onSpeechError);
} else {
} else if (speechRecognition) {
speechRecognition.stop();
} else {
onSpeechError(new Error('not supported'));
}
}
} catch (e) {
onSpeechError(e);
}
}, [recording, message, dispatch]);
}, [recording, message, dispatch, onSpeechError, openAIApiKey]);
const onKeyDown = useCallback((e: React.KeyboardEvent<HTMLTextAreaElement>) => {

View File

@ -6,6 +6,7 @@ import { useAppDispatch, useAppSelector } from "../../store";
import { selectOpenAIApiKey, setOpenAIApiKeyFromEvent, selectUseOpenAIWhisper, setUseOpenAIWhisperFromEvent } from "../../store/api-keys";
import { selectSettingsOption } from "../../store/settings-ui";
import { FormattedMessage, useIntl } from "react-intl";
import { supportsSpeechRecognition } from "../../speech-recognition-types";
export default function UserOptionsTab(props: any) {
const option = useAppSelector(selectSettingsOption);
@ -31,11 +32,11 @@ export default function UserOptionsTab(props: any) {
</a>
</p>
<Checkbox
{supportsSpeechRecognition && <Checkbox
style={{ marginTop: '1rem' }}
id="use-openai-whisper-api" checked={useOpenAIWhisper!} onChange={onUseOpenAIWhisperChange}
label="Use the OpenAI Whisper API for speech recognition."
/>
/>}
<p>
<FormattedMessage defaultMessage="Your API key is stored only on this device and never transmitted to anyone except OpenAI." />

View File

@ -122,12 +122,17 @@ declare global {
}
}
let speechRecognition: SpeechRecognition
let speechRecognition: SpeechRecognition | null = null;
if (window.SpeechRecognition) {
speechRecognition = new SpeechRecognition()
} else {
speechRecognition = new webkitSpeechRecognition()
} else if ((window as any).webkitSpeechRecognition) {
speechRecognition = new (window as any).webkitSpeechRecognition() as SpeechRecognition;
}
export { speechRecognition }
const supportsSpeechRecognition = speechRecognition !== null;
export {
speechRecognition,
supportsSpeechRecognition,
}