From 172c5521a525ea3e9e8ecf9e89bab0b7596e2d34 Mon Sep 17 00:00:00 2001
From: Tycho Luyben
Date: Sat, 18 Mar 2023 14:06:11 +0000
Subject: [PATCH] first framework for using the whisper openai api
---
app/package.json | 1 +
app/src/components/input.tsx | 33 +++++++++++++++++++++-------
app/src/components/settings/user.tsx | 11 +++++++---
app/src/store/api-keys.ts | 9 ++++++++
4 files changed, 43 insertions(+), 11 deletions(-)
diff --git a/app/package.json b/app/package.json
index b39d8c1..fba67cb 100644
--- a/app/package.json
+++ b/app/package.json
@@ -20,6 +20,7 @@
"jshashes": "^1.0.8",
"localforage": "^1.10.0",
"match-sorter": "^6.3.1",
+ "mic-recorder-to-mp3": "^2.2.2",
"minisearch": "^6.0.1",
"natural": "^6.2.0",
"openai": "^3.2.1",
diff --git a/app/src/components/input.tsx b/app/src/components/input.tsx
index 8b0a40c..82bedbe 100644
--- a/app/src/components/input.tsx
+++ b/app/src/components/input.tsx
@@ -10,6 +10,9 @@ import { selectMessage, setMessage } from '../store/message';
import { selectTemperature } from '../store/parameters';
import { openSystemPromptPanel, openTemperaturePanel } from '../store/settings-ui';
import { speechRecognition } from '../speech-recognition-types.d'
+import MicRecorder from 'mic-recorder-to-mp3';
+import { selectUseOpenAIWhisper } from '../store/api-keys';
+
const Container = styled.div`
background: #292933;
@@ -40,6 +43,8 @@ export default function MessageInput(props: MessageInputProps) {
const message = useAppSelector(selectMessage);
const [recording, setRecording] = useState(false);
const hasVerticalSpace = useMediaQuery('(min-height: 1000px)');
+ const recorder = new MicRecorder({ bitRate: 128 })
+ const useOpenAIWhisper = useAppSelector(selectUseOpenAIWhisper);
const context = useAppContext();
const dispatch = useAppDispatch();
@@ -62,18 +67,30 @@ export default function MessageInput(props: MessageInputProps) {
const onSpeechStart = () => {
if (!recording) {
setRecording(true);
- speechRecognition.continuous = true;
- speechRecognition.interimResults = true;
- speechRecognition.onresult = (event) => {
- const transcript = event.results[event.results.length - 1][0].transcript;
- dispatch(setMessage(transcript));
- };
+ // if we are using whisper, the we will just record with the browser and send the api when done
+ if (useOpenAIWhisper) {
- speechRecognition.start();
+ } else {
+
+ speechRecognition.continuous = true;
+ speechRecognition.interimResults = true;
+
+ speechRecognition.onresult = (event) => {
+ const transcript = event.results[event.results.length - 1][0].transcript;
+ dispatch(setMessage(transcript));
+ };
+
+ speechRecognition.start();
+ }
} else {
setRecording(false);
- speechRecognition.stop();
+ if (useOpenAIWhisper) {
+
+ } else {
+ speechRecognition.stop();
+
+ }
}
}
diff --git a/app/src/components/settings/user.tsx b/app/src/components/settings/user.tsx
index 9e217fa..fc2370e 100644
--- a/app/src/components/settings/user.tsx
+++ b/app/src/components/settings/user.tsx
@@ -3,22 +3,24 @@ import SettingsOption from "./option";
import { TextInput } from "@mantine/core";
import { useCallback, useMemo } from "react";
import { useAppDispatch, useAppSelector } from "../../store";
-import { selectOpenAIApiKey, setOpenAIApiKeyFromEvent } from "../../store/api-keys";
+import { selectOpenAIApiKey, setOpenAIApiKeyFromEvent, selectUseOpenAIWhisper, setUseOpenAIWhisperFromEvent } from "../../store/api-keys";
import { selectSettingsOption } from "../../store/settings-ui";
import { FormattedMessage, useIntl } from "react-intl";
export default function UserOptionsTab(props: any) {
const option = useAppSelector(selectSettingsOption);
const openaiApiKey = useAppSelector(selectOpenAIApiKey);
+ const useOpenAIWhisper = useAppSelector(selectUseOpenAIWhisper);
const intl = useIntl()
const dispatch = useAppDispatch();
const onOpenAIApiKeyChange = useCallback((event: React.ChangeEvent) => dispatch(setOpenAIApiKeyFromEvent(event)), [dispatch]);
+ const onUseOpenAIWhisperChange = useCallback((event: React.ChangeEvent) => dispatch(setUseOpenAIWhisperFromEvent(event)), [dispatch]);
const elem = useMemo(() => (
+ focused={option === 'openai-api-key'}>
+
+ Use the OpenAI Whisper API for speech recognition.
+
@@ -36,7 +41,7 @@ export default function UserOptionsTab(props: any) {
- ), [option, openaiApiKey, onOpenAIApiKeyChange]);
+ ), [option, openaiApiKey, useOpenAIWhisper, onOpenAIApiKeyChange]);
return elem;
}
\ No newline at end of file
diff --git a/app/src/store/api-keys.ts b/app/src/store/api-keys.ts
index 59a8634..3a53cf7 100644
--- a/app/src/store/api-keys.ts
+++ b/app/src/store/api-keys.ts
@@ -3,9 +3,12 @@ import type { RootState } from '.';
const initialState: {
openAIApiKey?: string | null | undefined;
+ useOpenAIWhisper?: boolean | null | undefined;
elevenLabsApiKey?: string | null | undefined;
+
} = {
openAIApiKey: localStorage.getItem('openai-api-key'),
+ useOpenAIWhisper: localStorage.getItem('use-openai-whisper') === 'true',
elevenLabsApiKey: localStorage.getItem('elevenlabs-api-key'),
};
@@ -18,7 +21,11 @@ export const apiKeysSlice = createSlice({
},
setElevenLabsApiKey: (state, action: PayloadAction) => {
state.elevenLabsApiKey = action.payload;
+ },
+ setUseOpenAIWhisper: (state, action: PayloadAction) => {
+ state.useOpenAIWhisper = action.payload;
}
+
},
})
@@ -26,8 +33,10 @@ export const { setOpenAIApiKey, setElevenLabsApiKey } = apiKeysSlice.actions;
export const setOpenAIApiKeyFromEvent = (event: React.ChangeEvent) => apiKeysSlice.actions.setOpenAIApiKey(event.target.value);
export const setElevenLabsApiKeyFromEvent = (event: React.ChangeEvent) => apiKeysSlice.actions.setElevenLabsApiKey(event.target.value);
+export const setUseOpenAIWhisperFromEvent = (event: React.ChangeEvent) => apiKeysSlice.actions.setUseOpenAIWhisper(event.target.checked);
export const selectOpenAIApiKey = (state: RootState) => state.apiKeys.openAIApiKey;
export const selectElevenLabsApiKey = (state: RootState) => state.apiKeys.elevenLabsApiKey;
+export const selectUseOpenAIWhisper = (state: RootState) => state.apiKeys.useOpenAIWhisper;
export default apiKeysSlice.reducer;
\ No newline at end of file