chat-with-gpt/app/src/components/input.tsx

255 lines
10 KiB
TypeScript
Raw Normal View History

2023-03-06 13:30:58 +00:00
import styled from '@emotion/styled';
2023-03-14 11:00:40 +00:00
import { Button, ActionIcon, Textarea, Loader } from '@mantine/core';
import { useMediaQuery } from '@mantine/hooks';
2023-03-18 12:49:30 +00:00
import { useCallback, useMemo, useState } from 'react';
2023-03-14 11:00:40 +00:00
import { FormattedMessage, useIntl } from 'react-intl';
2023-03-08 21:30:11 +00:00
import { useLocation } from 'react-router-dom';
2023-03-06 13:30:58 +00:00
import { useAppContext } from '../context';
2023-03-10 22:00:37 +00:00
import { useAppDispatch, useAppSelector } from '../store';
import { selectMessage, setMessage } from '../store/message';
import { selectTemperature } from '../store/parameters';
import { openSystemPromptPanel, openTemperaturePanel } from '../store/settings-ui';
2023-03-18 12:49:30 +00:00
import { speechRecognition } from '../speech-recognition-types.d'
import MicRecorder from 'mic-recorder-to-mp3';
2023-03-19 07:22:23 +00:00
import { selectUseOpenAIWhisper, selectOpenAIApiKey } from '../store/api-keys';
import { Mp3Encoder } from 'lamejs';
2023-03-06 13:30:58 +00:00
const Container = styled.div`
background: #292933;
border-top: thin solid #393933;
padding: 1rem 1rem 0 1rem;
.inner {
max-width: 50rem;
margin: auto;
text-align: right;
}
.settings-button {
margin: 0.5rem -0.4rem 0.5rem 1rem;
font-size: 0.7rem;
color: #999;
}
`;
export declare type OnSubmit = (name?: string) => Promise<boolean>;
export interface MessageInputProps {
disabled?: boolean;
}
2023-03-19 07:22:23 +00:00
async function chunkAndEncodeMP3File(file: Blob): Promise<Array<File>> {
const MAX_CHUNK_SIZE = 25 * 1024 * 1024; // 25 MB
const audioContext = new AudioContext();
const audioBuffer = await audioContext.decodeAudioData(await file.arrayBuffer());
const duration = audioBuffer.duration;
const sampleRate = audioBuffer.sampleRate;
const numChannels = audioBuffer.numberOfChannels;
const bytesPerSample = 2; // 16-bit audio
const samplesPerChunk = Math.floor((MAX_CHUNK_SIZE / bytesPerSample) / numChannels);
const totalSamples = Math.floor(duration * sampleRate);
const numChunks = Math.ceil(totalSamples / samplesPerChunk);
const chunks: Array<File> = [];
for (let i = 0; i < numChunks; i++) {
const startSample = i * samplesPerChunk;
const endSample = Math.min(startSample + samplesPerChunk, totalSamples);
const chunkDuration = (endSample - startSample) / sampleRate;
const chunkBuffer = audioContext.createBuffer(numChannels, endSample - startSample, sampleRate);
for (let c = 0; c < numChannels; c++) {
const channelData = audioBuffer.getChannelData(c).subarray(startSample, endSample);
chunkBuffer.copyToChannel(channelData, c);
}
const chunkBlob = await new Promise<Blob>((resolve) => {
const encoder = new Mp3Encoder(numChannels, sampleRate, 128);
const leftData = chunkBuffer.getChannelData(0);
const rightData = numChannels === 1 ? leftData : chunkBuffer.getChannelData(1);
const mp3Data = encoder.encodeBuffer(leftData, rightData);
const blob = new Blob([mp3Data], { type: 'audio/mp3' });
resolve(blob);
});
chunks.push(new File([chunkBlob], `text-${i}.mp3`, { type: 'audio/mp3' }));
}
return chunks;
}
2023-03-06 13:30:58 +00:00
export default function MessageInput(props: MessageInputProps) {
2023-03-10 22:00:37 +00:00
const temperature = useAppSelector(selectTemperature);
const message = useAppSelector(selectMessage);
2023-03-18 12:49:30 +00:00
const [recording, setRecording] = useState(false);
2023-03-14 11:00:40 +00:00
const hasVerticalSpace = useMediaQuery('(min-height: 1000px)');
2023-03-19 07:22:23 +00:00
const recorder = useMemo(() => new MicRecorder({ bitRate: 128 }), []);
const useOpenAIWhisper = useAppSelector(selectUseOpenAIWhisper);
2023-03-19 07:22:23 +00:00
const openAIApiKey = useAppSelector(selectOpenAIApiKey);
2023-03-18 12:49:30 +00:00
2023-03-06 13:30:58 +00:00
const context = useAppContext();
2023-03-10 22:00:37 +00:00
const dispatch = useAppDispatch();
2023-03-14 11:00:40 +00:00
const intl = useIntl();
2023-03-06 13:30:58 +00:00
2023-03-10 22:01:45 +00:00
const onCustomizeSystemPromptClick = useCallback(() => dispatch(openSystemPromptPanel()), [dispatch]);
const onTemperatureClick = useCallback(() => dispatch(openTemperaturePanel()), [dispatch]);
2023-03-06 13:30:58 +00:00
const onChange = useCallback((e: React.ChangeEvent<HTMLTextAreaElement>) => {
2023-03-10 22:00:37 +00:00
dispatch(setMessage(e.target.value));
2023-03-10 22:01:45 +00:00
}, [dispatch]);
2023-03-10 22:00:37 +00:00
const pathname = useLocation().pathname;
2023-03-06 13:30:58 +00:00
const onSubmit = useCallback(async () => {
2023-03-10 22:00:37 +00:00
if (await context.onNewMessage(message)) {
dispatch(setMessage(''));
2023-03-06 13:30:58 +00:00
}
2023-03-10 22:00:37 +00:00
}, [context, message, dispatch]);
2023-03-06 13:30:58 +00:00
const onSpeechStart = useCallback(() => {
2023-03-19 07:22:23 +00:00
2023-03-18 12:49:30 +00:00
if (!recording) {
setRecording(true);
// if we are using whisper, the we will just record with the browser and send the api when done
if (useOpenAIWhisper) {
2023-03-19 07:22:23 +00:00
recorder.start().catch((e: any) => console.error(e));
} else {
speechRecognition.continuous = true;
speechRecognition.interimResults = true;
2023-03-18 12:49:30 +00:00
speechRecognition.onresult = (event) => {
const transcript = event.results[event.results.length - 1][0].transcript;
dispatch(setMessage(transcript));
};
speechRecognition.start();
}
2023-03-18 12:49:30 +00:00
} else {
setRecording(false);
if (useOpenAIWhisper) {
2023-03-19 07:22:23 +00:00
const mp3 = recorder.stop().getMp3();
mp3.then(async ([buffer, blob]) => {
const file = new File(buffer, 'chat.mp3', {
type: blob.type,
lastModified: Date.now()
});
// TODO: cut in chunks
var data = new FormData()
data.append('file', file);
data.append('model', 'whisper-1')
try {
const response = await fetch("https://api.openai.com/v1/audio/transcriptions", {
method: "POST",
headers: {
'Authorization': `Bearer ${openAIApiKey}`,
},
body: data,
});
const json = await response.json()
if (json.text) {
dispatch(setMessage(json.text));
}
} catch (e) {
console.log(e)
2023-03-19 07:22:23 +00:00
}
2023-03-19 07:22:23 +00:00
}).catch((e: any) => console.error(e));
} else {
speechRecognition.stop();
}
2023-03-18 12:49:30 +00:00
}
}, [recording, message, dispatch]);
2023-03-18 12:49:30 +00:00
2023-03-06 13:30:58 +00:00
const onKeyDown = useCallback((e: React.KeyboardEvent<HTMLTextAreaElement>) => {
2023-03-08 21:30:11 +00:00
if (e.key === 'Enter' && e.shiftKey === false && !props.disabled) {
2023-03-06 13:30:58 +00:00
e.preventDefault();
onSubmit();
}
}, [onSubmit, props.disabled]);
const rightSection = useMemo(() => {
2023-03-18 12:49:30 +00:00
2023-03-06 13:30:58 +00:00
return (
<div style={{
opacity: '0.8',
2023-03-14 11:00:40 +00:00
paddingRight: '0.5rem',
display: 'flex',
justifyContent: 'flex-end',
alignItems: 'center',
width: '100%',
2023-03-06 13:30:58 +00:00
}}>
2023-03-14 11:00:40 +00:00
{context.generating && (<>
<Button variant="subtle" size="xs" compact onClick={() => {
context.chat.cancelReply(context.currentChat.leaf!.id);
}}>
2023-03-16 20:05:45 +00:00
<FormattedMessage defaultMessage={"Cancel"} description="Label for the button that can be clicked while the AI is generating a response to cancel generation" />
2023-03-14 11:00:40 +00:00
</Button>
<Loader size="xs" style={{ padding: '0 0.8rem 0 0.5rem' }} />
</>)}
{!context.generating && (
2023-03-18 12:49:30 +00:00
<>
<ActionIcon size="xl"
onClick={onSpeechStart}>
<i className="fa fa-microphone" style={{ fontSize: '90%', color: recording ? 'red' : 'inherit' }} />
</ActionIcon>
<ActionIcon size="xl"
onClick={onSubmit}>
<i className="fa fa-paper-plane" style={{ fontSize: '90%' }} />
</ActionIcon>
2023-03-18 12:49:30 +00:00
</>
2023-03-14 11:00:40 +00:00
)}
2023-03-06 13:30:58 +00:00
</div>
);
2023-03-18 12:49:30 +00:00
}, [recording, onSubmit, props.disabled, context.generating]);
2023-03-06 13:30:58 +00:00
2023-03-14 11:00:40 +00:00
const disabled = context.generating;
2023-03-08 21:30:11 +00:00
const isLandingPage = pathname === '/';
if (context.isShare || (!isLandingPage && !context.id)) {
return null;
}
2023-03-10 22:00:37 +00:00
2023-03-06 13:30:58 +00:00
return <Container>
<div className="inner">
2023-03-08 21:30:11 +00:00
<Textarea disabled={props.disabled || disabled}
2023-03-06 13:30:58 +00:00
autosize
2023-03-14 11:00:40 +00:00
minRows={(hasVerticalSpace || context.isHome) ? 3 : 2}
2023-03-06 13:30:58 +00:00
maxRows={12}
2023-03-14 11:00:40 +00:00
placeholder={intl.formatMessage({ defaultMessage: "Enter a message here..." })}
2023-03-10 22:00:37 +00:00
value={message}
2023-03-06 13:30:58 +00:00
onChange={onChange}
rightSection={rightSection}
2023-03-14 11:00:40 +00:00
rightSectionWidth={context.generating ? 100 : 55}
2023-03-06 13:30:58 +00:00
onKeyDown={onKeyDown} />
<div>
2023-03-08 21:30:11 +00:00
<Button variant="subtle"
className="settings-button"
size="xs"
compact
2023-03-10 22:00:37 +00:00
onClick={onCustomizeSystemPromptClick}>
2023-03-14 11:00:40 +00:00
<span>
2023-03-16 20:05:45 +00:00
<FormattedMessage defaultMessage={"Customize system prompt"} description="Label for the button that opens a modal for customizing the 'system prompt', a message used to customize and influence how the AI responds." />
2023-03-14 11:00:40 +00:00
</span>
2023-03-06 13:30:58 +00:00
</Button>
2023-03-08 21:30:11 +00:00
<Button variant="subtle"
className="settings-button"
size="xs"
compact
2023-03-10 22:00:37 +00:00
onClick={onTemperatureClick}>
2023-03-14 11:00:40 +00:00
<span>
<FormattedMessage defaultMessage="Temperature: {temperature, number, ::.0}"
2023-03-16 20:05:45 +00:00
description="Label for the button that opens a modal for setting the 'temperature' (randomness) of AI responses"
2023-03-14 11:00:40 +00:00
values={{ temperature }} />
</span>
2023-03-06 13:30:58 +00:00
</Button>
</div>
</div>
</Container>;
}