import styled from '@emotion/styled'; import { Button, ActionIcon, Textarea, Loader } from '@mantine/core'; import { useMediaQuery } from '@mantine/hooks'; import { useCallback, useMemo, useState } from 'react'; import { FormattedMessage, useIntl } from 'react-intl'; import { useLocation } from 'react-router-dom'; import { useAppContext } from '../context'; import { useAppDispatch, useAppSelector } from '../store'; import { selectMessage, setMessage } from '../store/message'; import { selectTemperature } from '../store/parameters'; import { openSystemPromptPanel, openTemperaturePanel } from '../store/settings-ui'; import { speechRecognition } from '../speech-recognition-types.d' import MicRecorder from 'mic-recorder-to-mp3'; import { selectUseOpenAIWhisper } from '../store/api-keys'; const Container = styled.div` background: #292933; border-top: thin solid #393933; padding: 1rem 1rem 0 1rem; .inner { max-width: 50rem; margin: auto; text-align: right; } .settings-button { margin: 0.5rem -0.4rem 0.5rem 1rem; font-size: 0.7rem; color: #999; } `; export declare type OnSubmit = (name?: string) => Promise; export interface MessageInputProps { disabled?: boolean; } export default function MessageInput(props: MessageInputProps) { const temperature = useAppSelector(selectTemperature); const message = useAppSelector(selectMessage); const [recording, setRecording] = useState(false); const hasVerticalSpace = useMediaQuery('(min-height: 1000px)'); const recorder = new MicRecorder({ bitRate: 128 }) const useOpenAIWhisper = useAppSelector(selectUseOpenAIWhisper); const context = useAppContext(); const dispatch = useAppDispatch(); const intl = useIntl(); const onCustomizeSystemPromptClick = useCallback(() => dispatch(openSystemPromptPanel()), [dispatch]); const onTemperatureClick = useCallback(() => dispatch(openTemperaturePanel()), [dispatch]); const onChange = useCallback((e: React.ChangeEvent) => { dispatch(setMessage(e.target.value)); }, [dispatch]); const pathname = useLocation().pathname; const onSubmit = useCallback(async () => { if (await context.onNewMessage(message)) { dispatch(setMessage('')); } }, [context, message, dispatch]); const onSpeechStart = () => { if (!recording) { setRecording(true); // if we are using whisper, the we will just record with the browser and send the api when done if (useOpenAIWhisper) { } else { speechRecognition.continuous = true; speechRecognition.interimResults = true; speechRecognition.onresult = (event) => { const transcript = event.results[event.results.length - 1][0].transcript; dispatch(setMessage(transcript)); }; speechRecognition.start(); } } else { setRecording(false); if (useOpenAIWhisper) { } else { speechRecognition.stop(); } } } const onKeyDown = useCallback((e: React.KeyboardEvent) => { if (e.key === 'Enter' && e.shiftKey === false && !props.disabled) { e.preventDefault(); onSubmit(); } }, [onSubmit, props.disabled]); const rightSection = useMemo(() => { return (
{context.generating && (<> )} {!context.generating && ( <> )}
); }, [recording, onSubmit, props.disabled, context.generating]); const disabled = context.generating; const isLandingPage = pathname === '/'; if (context.isShare || (!isLandingPage && !context.id)) { return null; } return