2023-03-06 13:30:58 +00:00
import styled from '@emotion/styled' ;
2023-04-15 10:30:02 +00:00
import { Button , ActionIcon , Textarea , Loader , Popover } from '@mantine/core' ;
import { getHotkeyHandler , useHotkeys , useMediaQuery } from '@mantine/hooks' ;
2023-03-21 00:11:05 +00:00
import { useCallback , useEffect , useMemo , useState } from 'react' ;
2023-03-14 11:00:40 +00:00
import { FormattedMessage , useIntl } from 'react-intl' ;
2023-04-15 10:30:02 +00:00
import { useLocation , useNavigate } from 'react-router-dom' ;
import { useAppContext } from '../core/context' ;
2023-03-10 22:00:37 +00:00
import { useAppDispatch , useAppSelector } from '../store' ;
import { selectMessage , setMessage } from '../store/message' ;
2023-04-15 10:30:02 +00:00
import { selectSettingsTab , openOpenAIApiKeyPanel } from '../store/settings-ui' ;
import { speechRecognition , supportsSpeechRecognition } from '../core/speech-recognition-types'
2023-03-21 00:11:05 +00:00
import { useWhisper } from '@chengsokdara/use-whisper' ;
2023-04-15 10:30:02 +00:00
import QuickSettings from './quick-settings' ;
import { useOption } from '../core/options/use-option' ;
2023-03-06 13:30:58 +00:00
const Container = styled . div `
background : # 292933 ;
border - top : thin solid # 393933 ;
padding : 1rem 1 rem 0 1 rem ;
. inner {
max - width : 50rem ;
margin : auto ;
text - align : right ;
}
. settings - button {
2023-04-15 10:30:02 +00:00
margin : 0.5rem - 0.4 rem 0.5 rem 1 rem ;
2023-03-06 13:30:58 +00:00
font - size : 0.7rem ;
color : # 999 ;
}
` ;
export declare type OnSubmit = ( name? : string ) = > Promise < boolean > ;
export interface MessageInputProps {
disabled? : boolean ;
}
export default function MessageInput ( props : MessageInputProps ) {
2023-03-10 22:00:37 +00:00
const message = useAppSelector ( selectMessage ) ;
2023-03-18 12:49:30 +00:00
const [ recording , setRecording ] = useState ( false ) ;
2023-03-21 00:11:05 +00:00
const [ speechError , setSpeechError ] = useState < string | null > ( null ) ;
2023-03-14 11:00:40 +00:00
const hasVerticalSpace = useMediaQuery ( '(min-height: 1000px)' ) ;
2023-04-15 10:30:02 +00:00
const [ useOpenAIWhisper ] = useOption < boolean > ( 'speech-recognition' , 'use-whisper' ) ;
const [ openAIApiKey ] = useOption < string > ( 'openai' , 'apiKey' ) ;
2023-03-18 12:49:30 +00:00
2023-03-21 00:11:05 +00:00
const [ initialMessage , setInitialMessage ] = useState ( '' ) ;
const {
transcribing ,
transcript ,
startRecording ,
stopRecording ,
} = useWhisper ( {
apiKey : openAIApiKey || ' ' ,
streaming : false ,
} ) ;
2023-04-15 10:30:02 +00:00
const navigate = useNavigate ( ) ;
2023-03-06 13:30:58 +00:00
const context = useAppContext ( ) ;
2023-03-10 22:00:37 +00:00
const dispatch = useAppDispatch ( ) ;
2023-03-14 11:00:40 +00:00
const intl = useIntl ( ) ;
2023-03-06 13:30:58 +00:00
2023-04-15 10:30:02 +00:00
const tab = useAppSelector ( selectSettingsTab ) ;
const [ showMicrophoneButton ] = useOption < boolean > ( 'speech-recognition' , 'show-microphone' ) ;
const [ submitOnEnter ] = useOption < boolean > ( 'input' , 'submit-on-enter' ) ;
2023-03-06 13:30:58 +00:00
const onChange = useCallback ( ( e : React.ChangeEvent < HTMLTextAreaElement > ) = > {
2023-03-10 22:00:37 +00:00
dispatch ( setMessage ( e . target . value ) ) ;
2023-03-10 22:01:45 +00:00
} , [ dispatch ] ) ;
2023-03-10 22:00:37 +00:00
const pathname = useLocation ( ) . pathname ;
2023-03-06 13:30:58 +00:00
const onSubmit = useCallback ( async ( ) = > {
2023-03-21 00:11:05 +00:00
setSpeechError ( null ) ;
2023-04-15 10:30:02 +00:00
const id = await context . onNewMessage ( message ) ;
if ( id ) {
if ( ! window . location . pathname . includes ( id ) ) {
navigate ( '/chat/' + id ) ;
}
2023-03-10 22:00:37 +00:00
dispatch ( setMessage ( '' ) ) ;
2023-03-06 13:30:58 +00:00
}
2023-04-15 10:30:02 +00:00
} , [ context , message , dispatch , navigate ] ) ;
2023-03-06 13:30:58 +00:00
2023-03-20 14:46:01 +00:00
const onSpeechError = useCallback ( ( e : any ) = > {
console . error ( 'speech recognition error' , e ) ;
2023-03-21 00:11:05 +00:00
setSpeechError ( e . message ) ;
2023-03-19 07:22:23 +00:00
2023-03-20 14:46:01 +00:00
try {
2023-03-20 21:03:12 +00:00
speechRecognition ? . stop ( ) ;
2023-03-20 14:46:01 +00:00
} catch ( e ) {
}
2023-03-18 12:49:30 +00:00
2023-03-20 14:46:01 +00:00
try {
2023-03-21 00:11:05 +00:00
stopRecording ( ) ;
2023-03-20 14:46:01 +00:00
} catch ( e ) { }
2023-03-18 12:49:30 +00:00
2023-03-20 14:46:01 +00:00
setRecording ( false ) ;
2023-03-21 00:11:05 +00:00
} , [ stopRecording ] ) ;
2023-03-18 14:06:11 +00:00
2023-03-21 00:11:05 +00:00
const onHideSpeechError = useCallback ( ( ) = > setSpeechError ( null ) , [ ] ) ;
const onSpeechStart = useCallback ( async ( ) = > {
let granted = false ;
let denied = false ;
try {
const result = await navigator . permissions . query ( { name : 'microphone' as any } ) ;
if ( result . state == 'granted' ) {
granted = true ;
} else if ( result . state == 'denied' ) {
denied = true ;
}
2023-04-15 10:30:02 +00:00
} catch ( e ) { }
2023-03-21 00:11:05 +00:00
if ( ! granted && ! denied ) {
try {
const stream = await navigator . mediaDevices . getUserMedia ( { video : false , audio : true } ) ;
stream . getTracks ( ) . forEach ( track = > track . stop ( ) ) ;
granted = true ;
} catch ( e ) {
denied = true ;
}
}
if ( denied ) {
onSpeechError ( new Error ( 'speech permission was not granted' ) ) ;
return ;
2023-03-20 21:03:12 +00:00
}
2023-03-20 14:46:01 +00:00
try {
if ( ! recording ) {
setRecording ( true ) ;
2023-03-20 21:03:12 +00:00
if ( useOpenAIWhisper || ! supportsSpeechRecognition ) {
2023-03-21 00:11:05 +00:00
if ( ! openAIApiKey ) {
dispatch ( openOpenAIApiKeyPanel ( ) ) ;
return false ;
}
// recorder.start().catch(onSpeechError);
setInitialMessage ( message ) ;
await startRecording ( ) ;
2023-03-20 21:03:12 +00:00
} else if ( speechRecognition ) {
2023-03-20 14:46:01 +00:00
const initialMessage = message ;
speechRecognition . continuous = true ;
speechRecognition . interimResults = true ;
speechRecognition . onresult = ( event ) = > {
let transcript = '' ;
2023-03-20 15:36:20 +00:00
for ( let i = 0 ; i < event . results . length ; i ++ ) {
if ( event . results [ i ] . isFinal && event . results [ i ] [ 0 ] . confidence ) {
transcript += event . results [ i ] [ 0 ] . transcript ;
}
2023-03-20 14:46:01 +00:00
}
dispatch ( setMessage ( initialMessage + ' ' + transcript ) ) ;
} ;
2023-03-19 09:33:52 +00:00
2023-03-20 14:46:01 +00:00
speechRecognition . start ( ) ;
2023-03-20 21:03:12 +00:00
} else {
onSpeechError ( new Error ( 'not supported' ) ) ;
2023-03-20 14:46:01 +00:00
}
} else {
2023-03-20 21:03:12 +00:00
if ( useOpenAIWhisper || ! supportsSpeechRecognition ) {
2023-03-21 00:11:05 +00:00
await stopRecording ( ) ;
setTimeout ( ( ) = > setRecording ( false ) , 500 ) ;
2023-03-20 21:03:12 +00:00
} else if ( speechRecognition ) {
2023-03-20 14:46:01 +00:00
speechRecognition . stop ( ) ;
2023-03-21 00:11:05 +00:00
setRecording ( false ) ;
2023-03-20 21:03:12 +00:00
} else {
onSpeechError ( new Error ( 'not supported' ) ) ;
2023-03-20 14:46:01 +00:00
}
2023-03-18 14:06:11 +00:00
}
2023-03-20 14:46:01 +00:00
} catch ( e ) {
onSpeechError ( e ) ;
2023-03-18 12:49:30 +00:00
}
2023-03-21 00:11:05 +00:00
} , [ recording , message , dispatch , onSpeechError , setInitialMessage , openAIApiKey ] ) ;
2023-03-18 12:49:30 +00:00
2023-03-21 00:11:05 +00:00
useEffect ( ( ) = > {
if ( useOpenAIWhisper || ! supportsSpeechRecognition ) {
if ( ! transcribing && ! recording && transcript ? . text ) {
dispatch ( setMessage ( initialMessage + ' ' + transcript . text ) ) ;
}
}
} , [ initialMessage , transcript , recording , transcribing , useOpenAIWhisper , dispatch ] ) ;
2023-03-18 12:49:30 +00:00
2023-04-15 10:30:02 +00:00
useHotkeys ( [
[ 'n' , ( ) = > document . querySelector < HTMLTextAreaElement > ( '#message-input' ) ? . focus ( ) ]
] ) ;
const blur = useCallback ( ( ) = > {
document . querySelector < HTMLTextAreaElement > ( '#message-input' ) ? . blur ( ) ;
} , [ ] ) ;
2023-03-06 13:30:58 +00:00
const rightSection = useMemo ( ( ) = > {
return (
< div style = { {
opacity : '0.8' ,
2023-03-14 11:00:40 +00:00
paddingRight : '0.5rem' ,
display : 'flex' ,
justifyContent : 'flex-end' ,
alignItems : 'center' ,
width : '100%' ,
2023-03-06 13:30:58 +00:00
} } >
2023-03-14 11:00:40 +00:00
{ context . generating && ( < >
< Button variant = "subtle" size = "xs" compact onClick = { ( ) = > {
2023-04-15 10:30:02 +00:00
context . chat . cancelReply ( context . currentChat . chat ? . id , context . currentChat . leaf ! . id ) ;
2023-03-14 11:00:40 +00:00
} } >
2023-03-16 20:05:45 +00:00
< FormattedMessage defaultMessage = { "Cancel" } description = "Label for the button that can be clicked while the AI is generating a response to cancel generation" / >
2023-03-14 11:00:40 +00:00
< / Button >
< Loader size = "xs" style = { { padding : '0 0.8rem 0 0.5rem' } } / >
< / > ) }
{ ! context . generating && (
2023-03-18 12:49:30 +00:00
< >
2023-04-15 10:30:02 +00:00
{ showMicrophoneButton && < Popover width = { 200 } position = "bottom" withArrow shadow = "md" opened = { speechError !== null } >
2023-03-21 00:11:05 +00:00
< Popover.Target >
< ActionIcon size = "xl"
onClick = { onSpeechStart } >
{ transcribing && < Loader size = "xs" / > }
{ ! transcribing && < i className = "fa fa-microphone" style = { { fontSize : '90%' , color : recording ? 'red' : 'inherit' } } / > }
< / ActionIcon >
< / Popover.Target >
< Popover.Dropdown >
< div style = { {
display : 'flex' ,
flexDirection : 'column' ,
alignItems : 'flex-start' ,
} } >
< p style = { {
fontFamily : ` "Work Sans", sans-serif ` ,
fontSize : '0.9rem' ,
textAlign : 'center' ,
marginBottom : '0.5rem' ,
} } >
Sorry , an error occured trying to record audio .
< / p >
< Button variant = "light" size = "xs" fullWidth onClick = { onHideSpeechError } >
Close
< / Button >
< / div >
< / Popover.Dropdown >
2023-04-15 10:30:02 +00:00
< / Popover > }
2023-03-19 09:33:52 +00:00
< ActionIcon size = "xl"
onClick = { onSubmit } >
< i className = "fa fa-paper-plane" style = { { fontSize : '90%' } } / >
< / ActionIcon >
2023-03-18 12:49:30 +00:00
< / >
2023-03-14 11:00:40 +00:00
) }
2023-03-06 13:30:58 +00:00
< / div >
) ;
2023-04-15 10:30:02 +00:00
} , [ recording , transcribing , onSubmit , onSpeechStart , props . disabled , context . generating , speechError , onHideSpeechError , showMicrophoneButton ] ) ;
2023-03-06 13:30:58 +00:00
2023-03-14 11:00:40 +00:00
const disabled = context . generating ;
2023-03-08 21:30:11 +00:00
const isLandingPage = pathname === '/' ;
if ( context . isShare || ( ! isLandingPage && ! context . id ) ) {
return null ;
}
2023-03-10 22:00:37 +00:00
2023-04-15 10:30:02 +00:00
const hotkeyHandler = useMemo ( ( ) = > {
const keys = [
[ 'Escape' , blur , { preventDefault : true } ] ,
2023-04-29 18:51:54 +00:00
[ 'ctrl+Enter' , onSubmit , { preventDefault : true } ] ,
2023-04-15 10:30:02 +00:00
] ;
if ( submitOnEnter ) {
keys . unshift ( [ 'Enter' , onSubmit , { preventDefault : true } ] ) ;
}
const handler = getHotkeyHandler ( keys as any ) ;
return handler ;
} , [ onSubmit , blur , submitOnEnter ] ) ;
2023-03-06 13:30:58 +00:00
return < Container >
< div className = "inner" >
2023-03-08 21:30:11 +00:00
< Textarea disabled = { props . disabled || disabled }
2023-04-15 10:30:02 +00:00
id = "message-input"
2023-03-06 13:30:58 +00:00
autosize
2023-03-14 11:00:40 +00:00
minRows = { ( hasVerticalSpace || context . isHome ) ? 3 : 2 }
2023-03-06 13:30:58 +00:00
maxRows = { 12 }
2023-03-14 11:00:40 +00:00
placeholder = { intl . formatMessage ( { defaultMessage : "Enter a message here..." } ) }
2023-03-10 22:00:37 +00:00
value = { message }
2023-03-06 13:30:58 +00:00
onChange = { onChange }
rightSection = { rightSection }
2023-03-14 11:00:40 +00:00
rightSectionWidth = { context . generating ? 100 : 55 }
2023-04-15 10:30:02 +00:00
onKeyDown = { hotkeyHandler } / >
< QuickSettings key = { tab } / >
2023-03-06 13:30:58 +00:00
< / div >
< / Container > ;
2023-03-20 15:36:20 +00:00
}