first framework for using the whisper openai api

main
Tycho Luyben 2023-03-18 14:06:11 +00:00
parent 72cefee277
commit 172c5521a5
4 changed files with 43 additions and 11 deletions

View File

@ -20,6 +20,7 @@
"jshashes": "^1.0.8",
"localforage": "^1.10.0",
"match-sorter": "^6.3.1",
"mic-recorder-to-mp3": "^2.2.2",
"minisearch": "^6.0.1",
"natural": "^6.2.0",
"openai": "^3.2.1",

View File

@ -10,6 +10,9 @@ import { selectMessage, setMessage } from '../store/message';
import { selectTemperature } from '../store/parameters';
import { openSystemPromptPanel, openTemperaturePanel } from '../store/settings-ui';
import { speechRecognition } from '../speech-recognition-types.d'
import MicRecorder from 'mic-recorder-to-mp3';
import { selectUseOpenAIWhisper } from '../store/api-keys';
const Container = styled.div`
background: #292933;
@ -40,6 +43,8 @@ export default function MessageInput(props: MessageInputProps) {
const message = useAppSelector(selectMessage);
const [recording, setRecording] = useState(false);
const hasVerticalSpace = useMediaQuery('(min-height: 1000px)');
const recorder = new MicRecorder({ bitRate: 128 })
const useOpenAIWhisper = useAppSelector(selectUseOpenAIWhisper);
const context = useAppContext();
const dispatch = useAppDispatch();
@ -62,18 +67,30 @@ export default function MessageInput(props: MessageInputProps) {
const onSpeechStart = () => {
if (!recording) {
setRecording(true);
speechRecognition.continuous = true;
speechRecognition.interimResults = true;
speechRecognition.onresult = (event) => {
const transcript = event.results[event.results.length - 1][0].transcript;
dispatch(setMessage(transcript));
};
// if we are using whisper, the we will just record with the browser and send the api when done
if (useOpenAIWhisper) {
speechRecognition.start();
} else {
speechRecognition.continuous = true;
speechRecognition.interimResults = true;
speechRecognition.onresult = (event) => {
const transcript = event.results[event.results.length - 1][0].transcript;
dispatch(setMessage(transcript));
};
speechRecognition.start();
}
} else {
setRecording(false);
speechRecognition.stop();
if (useOpenAIWhisper) {
} else {
speechRecognition.stop();
}
}
}

View File

@ -3,22 +3,24 @@ import SettingsOption from "./option";
import { TextInput } from "@mantine/core";
import { useCallback, useMemo } from "react";
import { useAppDispatch, useAppSelector } from "../../store";
import { selectOpenAIApiKey, setOpenAIApiKeyFromEvent } from "../../store/api-keys";
import { selectOpenAIApiKey, setOpenAIApiKeyFromEvent, selectUseOpenAIWhisper, setUseOpenAIWhisperFromEvent } from "../../store/api-keys";
import { selectSettingsOption } from "../../store/settings-ui";
import { FormattedMessage, useIntl } from "react-intl";
export default function UserOptionsTab(props: any) {
const option = useAppSelector(selectSettingsOption);
const openaiApiKey = useAppSelector(selectOpenAIApiKey);
const useOpenAIWhisper = useAppSelector(selectUseOpenAIWhisper);
const intl = useIntl()
const dispatch = useAppDispatch();
const onOpenAIApiKeyChange = useCallback((event: React.ChangeEvent<HTMLInputElement>) => dispatch(setOpenAIApiKeyFromEvent(event)), [dispatch]);
const onUseOpenAIWhisperChange = useCallback((event: React.ChangeEvent<HTMLInputElement>) => dispatch(setUseOpenAIWhisperFromEvent(event)), [dispatch]);
const elem = useMemo(() => (
<SettingsTab name="user">
<SettingsOption heading={intl.formatMessage({ defaultMessage: "Your OpenAI API Key", description: "Heading for the OpenAI API key setting on the settings screen" })}
focused={option === 'openai-api-key'}>
focused={option === 'openai-api-key'}>
<TextInput
placeholder={intl.formatMessage({ defaultMessage: "Paste your API key here" })}
value={openaiApiKey || ''}
@ -28,6 +30,9 @@ export default function UserOptionsTab(props: any) {
<FormattedMessage defaultMessage="Find your API key here." description="Label for the link that takes the user to the page on the OpenAI website where they can find their API key." />
</a>
</p>
<p>
<input type="checkbox" id="use-openai-whisper-api" checked={useOpenAIWhisper!} onChange={onUseOpenAIWhisperChange} /> Use the OpenAI Whisper API for speech recognition.
</p>
<p>
<FormattedMessage defaultMessage="Your API key is stored only on this device and never transmitted to anyone except OpenAI." />
</p>
@ -36,7 +41,7 @@ export default function UserOptionsTab(props: any) {
</p>
</SettingsOption>
</SettingsTab>
), [option, openaiApiKey, onOpenAIApiKeyChange]);
), [option, openaiApiKey, useOpenAIWhisper, onOpenAIApiKeyChange]);
return elem;
}

View File

@ -3,9 +3,12 @@ import type { RootState } from '.';
const initialState: {
openAIApiKey?: string | null | undefined;
useOpenAIWhisper?: boolean | null | undefined;
elevenLabsApiKey?: string | null | undefined;
} = {
openAIApiKey: localStorage.getItem('openai-api-key'),
useOpenAIWhisper: localStorage.getItem('use-openai-whisper') === 'true',
elevenLabsApiKey: localStorage.getItem('elevenlabs-api-key'),
};
@ -18,7 +21,11 @@ export const apiKeysSlice = createSlice({
},
setElevenLabsApiKey: (state, action: PayloadAction<string>) => {
state.elevenLabsApiKey = action.payload;
},
setUseOpenAIWhisper: (state, action: PayloadAction<boolean>) => {
state.useOpenAIWhisper = action.payload;
}
},
})
@ -26,8 +33,10 @@ export const { setOpenAIApiKey, setElevenLabsApiKey } = apiKeysSlice.actions;
export const setOpenAIApiKeyFromEvent = (event: React.ChangeEvent<HTMLInputElement>) => apiKeysSlice.actions.setOpenAIApiKey(event.target.value);
export const setElevenLabsApiKeyFromEvent = (event: React.ChangeEvent<HTMLInputElement>) => apiKeysSlice.actions.setElevenLabsApiKey(event.target.value);
export const setUseOpenAIWhisperFromEvent = (event: React.ChangeEvent<HTMLInputElement>) => apiKeysSlice.actions.setUseOpenAIWhisper(event.target.checked);
export const selectOpenAIApiKey = (state: RootState) => state.apiKeys.openAIApiKey;
export const selectElevenLabsApiKey = (state: RootState) => state.apiKeys.elevenLabsApiKey;
export const selectUseOpenAIWhisper = (state: RootState) => state.apiKeys.useOpenAIWhisper;
export default apiKeysSlice.reducer;