Skip to content

Commit

Permalink
feat: text to speak
Browse files Browse the repository at this point in the history
  • Loading branch information
liuxinqi committed Mar 27, 2023
1 parent 829bfac commit e90ec2c
Show file tree
Hide file tree
Showing 6 changed files with 179 additions and 19 deletions.
52 changes: 45 additions & 7 deletions src/components/Content.vue
Original file line number Diff line number Diff line change
@@ -1,20 +1,30 @@
<script setup lang="ts">
import Button from '@/components/widgets/Button.vue'
import { generateDashboardInfo, generateText } from '@/server/api'
import { generateText } from '@/server/api'
import { useScroll } from '@/hooks'
import { Recognition, getKey, verifyKey } from '@/utils'
// states
const chatMessages = ref<ChatMessage[]>([])
const message = ref('')
const loading = ref(false)
const text = ref('')
const recognition = new Recognition('en-US')
// hooks
const { el, scrollToBottom } = useScroll()
const speech = useSpeechSynthesis(text)
const { start } = useSpeechRecognition()
// effects
watch(chatMessages.value, () => nextTick(() => scrollToBottom()))
// methods
function play(content: string) {
text.value = content
speech.speak()
}
const roleClass = (role: string) => {
switch (role) {
case 'user':
Expand All @@ -25,17 +35,32 @@ const roleClass = (role: string) => {
return 'bg-gray-500'
}
}
const startTalking = () => {
recognition.start()
recognition.onResult((value) => {
console.log('value', value)
})
}
const onSubmit = async () => {
const key = getKey()
if (!verifyKey(key)) return alert('请输入正确的API-KEY')
if (!message.value) return
chatMessages.value.push({
content: message.value,
role: 'user',
})
message.value = ''
loading.value = true
const res = await generateText(chatMessages.value)
const res = await generateText(chatMessages.value, key!)
if (res.error) {
alert(res.error?.message)
return loading.value = false
}
chatMessages.value.push({
content: res,
content: res.choices[0].message.content,
role: 'assistant',
})
loading.value = false
Expand All @@ -52,9 +77,21 @@ const onSubmit = async () => {
>
<div :class="roleClass(item.role)" />
<div relative>
<p mx-2 px-2 py-1 chat-box>
{{ item.content }}
</p>
<div mx-2>
<p px-2 py-1 chat-box>
{{ item.content }}
</p>
<p v-if="item.role === 'assistant'" flex>
<span class="bg-gray-100/20 rounded-lg w-4 py-1 px-3 center" @click="play(item.content)">
<i icon-btn rotate-90 i-ic:sharp-wifi />
</span>
<!-- <span
class="bg-gray-100/20 ml-1 cursor-pointer rounded-lg w-4 py-1 px-3 center"
>
<i icon-btn i-carbon:ibm-watson-language-translator />
</span> -->
</p>
</div>
</div>
</div>
</div>
Expand All @@ -63,6 +100,7 @@ const onSubmit = async () => {
<Button
mr-1
i-carbon:microphon
@click="startTalking()"
>
<i i-carbon:microphone />
</Button>
Expand Down
32 changes: 23 additions & 9 deletions src/server/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,32 @@ import { OpenAi } from '@/utils'
const apiKey = import.meta.env.VITE_OPENAI_API_KEY
const proxy = import.meta.env.VITE_SERVE_PROXY

const openai = new OpenAi(apiKey, proxy)
// const openai = new OpenAi(apiKey, proxy)

export const generateText = async (messages: ChatMessage[], apiKey: string) => {
const openai = new OpenAi(apiKey, proxy)

export const generateText = async (messages: ChatMessage[]) => {
const { url, initOptions } = openai.generateTurboPayload({ messages })
const response = await fetch(url, initOptions)
const data = await response.json()
return data.choices[0].message.content
try {
const response = await fetch(url, initOptions)
const data = await response.json()
return data
}
catch (error) {
return `[Error] ${(error as any).message}. try again later or try using proxy`
}
}

export const generateDashboardInfo = async () => {
export const generateDashboardInfo = async (apiKey: string) => {
const openai = new OpenAi(apiKey, proxy)

const { url, initOptions } = openai.generateDashboardPayload()
const response = await fetch(url, initOptions)
const data = await response.json()
return data
try {
const response = await fetch(url, initOptions)
const data = await response.json()
return data
}
catch (error) {
return `[Error] ${(error as any).message}. try again later or try using proxy`
}
}
9 changes: 9 additions & 0 deletions src/shima.d.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
declare global {
interface Window {
SpeechRecognition: any;
webkitSpeechRecognition: any;
mozSpeechRecognition: any;
msSpeechRecognition: any;
}
}
export {};
2 changes: 1 addition & 1 deletion src/utils/index.ts
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
export * from './openAi'

export * from './speaker'
13 changes: 11 additions & 2 deletions src/utils/openAi.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ export class OpenAi {
private proxy: string
private header: Record<string, string>

constructor(apiKey: string, proxy = 'https://api.openai.com') {
constructor(apiKey: string, proxy?: string) {
this.apiKey = apiKey
this.proxy = proxy
this.proxy = proxy || 'https://api.openai.com'
this.header = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
Expand Down Expand Up @@ -121,3 +121,12 @@ export const parseOpenAIStream = (rawResponse: Response) => {

return stream
}

// 验证key
export const verifyKey = (key?: string | null) => key && key.length === 51

// 获取key
export const getKey = (): string | null => {
const openkey = localStorage.getItem('openKey')
return openkey
}
90 changes: 90 additions & 0 deletions src/utils/speaker.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
export class Speaker {
public utter: SpeechSynthesisUtterance
public voices: SpeechSynthesisVoice[] = []

constructor(option: { lang?: string; pitch?: number; rate?: number; volume?: number; text?: string }) {
const {
lang = 'zh-CN',
pitch = 1,
rate = 1,
volume = 1,
text = '',
} = option
this.utter = new window.SpeechSynthesisUtterance()
this.utter.lang = lang
this.utter.pitch = pitch
this.utter.rate = rate
this.utter.volume = volume
this.utter.text = text
this.getVoices()
}

getVoices() {
window.speechSynthesis.onvoiceschanged = () => {
this.voices = window.speechSynthesis.getVoices()
if (this.voices.length > 0)
this.utter.voice = this.voices[0] // 设置声音来源
}
}

// 开始播放当前的语音
start() {
window.speechSynthesis.speak(this.utter)
}

// 暂停播放
pause() {
window.speechSynthesis.pause()
}

// 暂停之后继续播放
resume() {
window.speechSynthesis.resume()
}

// 清空所有播放
cancel() {
window.speechSynthesis.cancel()
}

// 切换语音的内容
change(text: string) {
this.utter.text = text
window.speechSynthesis.speak(this.utter)
}
}

export class Recognition {
public recognition: any
public isListening: boolean
public result: string

constructor(lang = 'zh-CN') {
this.recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition || window.mozSpeechRecognition || window.msSpeechRecognition)()
this.isListening = false
this.result = ''
this.recognition.lang = lang
}

// 开始语音识别
start() {
this.isListening = true
this.recognition.start()
}

// 结束语音识别
stop() {
this.isListening = false
this.recognition.stop()
}

// 监听语音识别的结果
onResult(callback: (result: string) => void) {
this.recognition.onresult = (e: any) => {
const result = e.results[0][0].transcript
this.result = result
callback(result)
}
}
}

0 comments on commit e90ec2c

Please sign in to comment.