diff --git a/components.d.ts b/components.d.ts index 08aebdf..d06cd78 100644 --- a/components.d.ts +++ b/components.d.ts @@ -11,6 +11,7 @@ declare module '@vue/runtime-core' { export interface GlobalComponents { Avatar: typeof import('./src/components/Avatar.vue')['default'] Button: typeof import('./src/components/Button.vue')['default'] + ElCascader: typeof import('element-plus/es')['ElCascader'] ElSwitch: typeof import('element-plus/es')['ElSwitch'] ElTooltip: typeof import('element-plus/es')['ElTooltip'] InputKit: typeof import('./src/components/InputKit.vue')['default'] diff --git a/src/hooks/useSpeechService.ts b/src/hooks/useSpeechService.ts index e666444..3d5e89f 100644 --- a/src/hooks/useSpeechService.ts +++ b/src/hooks/useSpeechService.ts @@ -83,7 +83,6 @@ export const useSpeechService = ({ langs = ['fr-FR', 'ja-JP', 'en-US', 'z mediaRecorder = new MediaRecorder(stream) mediaRecorder.ondataavailable = (e) => { - console.log(chunks, 'c') chunks.push(e.data) } @@ -103,12 +102,10 @@ export const useSpeechService = ({ langs = ['fr-FR', 'ja-JP', 'en-US', 'z isRecognizReadying.value = true - recognizer.value.canceled = () => { - console.log('Recognize canceled') - } recognizer.value.recognized = (s, e) => { console.log('Recognize result: ', e.result.text) cb && cb(e.result.text) + // isRecognizing.value = false } recognizer.value.recognizing = (s, event) => { console.log('Recognize recognizing', event.result.text) @@ -128,9 +125,9 @@ export const useSpeechService = ({ langs = ['fr-FR', 'ja-JP', 'en-US', 'z isRecognizing.value = false } recognizer.value.startContinuousRecognitionAsync(async () => { - await audioRecorder() isRecognizing.value = true isRecognizReadying.value = false + await audioRecorder() console.log('Recognize...') }, (error) => { diff --git a/src/pages/Home/components/Content.vue b/src/pages/Home/components/Content.vue index a708071..c5e31fb 100644 --- a/src/pages/Home/components/Content.vue +++ b/src/pages/Home/components/Content.vue @@ -114,9 +114,9 @@ async function onSubmit(fromRecognize = false) { ...currentChatMessages.value, { content: message.value, role: 'user', audioBlob: fromRecognize ? await blobToBase64(audioBlob.value) : '' }, ]) - const tempCurrentChatMessages = currentChatMessages.value.map(x => ({ content: x.content, role: x.role })) // 发送的请求中需去除audioBlob + const tempCurrentChatMessages = chatMessages.value.map(x => ({ content: x.content, role: x.role })) // 发送的请求中需去除audioBlob const systemMessage = currentChatMessages.value[0] - const relativeMessage = [...tempCurrentChatMessages, { content: message.value, role: 'user' }].slice(-(Number(chatRememberCount.value))) // 保留最近的几条消息 + const relativeMessage = tempCurrentChatMessages.slice(-(Number(chatRememberCount.value))) // 保留最近的几条消息 const prompts = [systemMessage, ...relativeMessage] as ChatMessage[] message.value = '' diff --git a/src/pages/Home/components/NewChat.vue b/src/pages/Home/components/NewChat.vue index 962ebde..00ece6c 100644 --- a/src/pages/Home/components/NewChat.vue +++ b/src/pages/Home/components/NewChat.vue @@ -15,33 +15,46 @@ const currentAvatarIndex = ref(Math.random() * avatarList.value.length | 0) const store = useConversationStore() const { ssmlToSpeak, isSynthesizing, isPlaying } = useSpeechService({ isFetchAllVoice: false }) const allLanguages = computed(() => [...new Set(allVoices.map(v => v.locale))].filter(l => Object.keys(supportLanguageMap).includes(l))) -const selectLanguage = ref('') -const filterVoices = ref([]) -const selectVoiceName = ref('') const desc = ref('') const name = ref('') const rate = ref('1.0') const previewText = ref('polyglot is awesome!') -const filterStyles = ref([]) -const selectStyle = ref('Neural') +const presets = ref('Act as if you are meeting someone for the first time. How would you introduce yourself and start a conversation?') -const canAdd = computed(() => !!(selectLanguage.value && selectVoiceName.value && desc.value && name.value)) - -onBeforeMount(() => { - selectLanguage.value = allLanguages.value[0] - changeSelectLanguage(selectLanguage.value) -}) +const voiceValue = ref(['en-US', 'en-US-JennyNeural', 'chat']) -watch(selectLanguage, changeSelectLanguage) +const selectLanguage = computed(() => voiceValue.value[0]) +const selectVoiceName = computed(() => voiceValue.value[1]) +const selectStyle = computed(() => voiceValue.value[2]) +const canAdd = computed(() => !!(selectLanguage.value && selectVoiceName.value && desc.value && name.value)) -function changeSelectLanguage(newSelectLanguage: string) { - filterVoices.value = allVoices.filter(v => v.locale === newSelectLanguage) - selectVoiceName.value = filterVoices.value[0]?.shortName +interface Option { + label: string + value: string + children?: Option[] } -watch(selectVoiceName, (n) => { - filterStyles.value = filterVoices.value.filter(v => v.shortName === n)[0]?.styleList || [] - selectStyle.value = filterStyles.value[0] || 'Neural' +const options = ref([]) + +onMounted(() => { + allLanguages.value.forEach((item) => { + const children: Option[] = [] + allVoices.forEach((v) => { + if (v.locale === item) { + children.push({ + value: v.shortName, + label: `${v.gender === 1 ? '🧒🏻' : '👦🏻'} ${v.localName}`, + children: v.styleList?.map(x => ({ label: voiceStyleMap[x], value: x })) || [], + }) + } + }) + + options.value.push({ + value: item, + label: supportLanguageMap[item], + children, + }) + }) }) const randomAvatar = getAvatarUrl(avatarList.value[Math.random() * avatarList.value.length | 0]) // 随机默认选择一个头像 @@ -60,7 +73,7 @@ const addChat = (event: any) => { rate: +rate.value, isDefault: false, voiceStyle: selectStyle.value, - }) + }, presets.value) store.changeCurrentKey(uid) emits('close') } @@ -74,16 +87,8 @@ const previewSpeech = () => { } - -