-
Notifications
You must be signed in to change notification settings - Fork 275
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
liuxinqi
committed
Mar 29, 2023
1 parent
f21db55
commit 8daad6c
Showing
5 changed files
with
74 additions
and
107 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,8 @@ | ||
# OpenAi Key | ||
VITE_OPENAI_API_KEY=xxx | ||
# Network Proxy | ||
VITE_SERVE_PROXY=xxx | ||
# Aruze Key | ||
VITE_SCRIPTION_KEY=dxxx | ||
# Aruze Region | ||
VITE_REGION=xxx |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,90 +1,46 @@ | ||
export class Speaker { | ||
public utter: SpeechSynthesisUtterance | ||
public voices: SpeechSynthesisVoice[] = [] | ||
|
||
constructor(option: { lang?: string; pitch?: number; rate?: number; volume?: number; text?: string }) { | ||
const { | ||
lang = 'zh-CN', | ||
pitch = 1, | ||
rate = 1, | ||
volume = 1, | ||
text = '', | ||
} = option | ||
this.utter = new window.SpeechSynthesisUtterance() | ||
this.utter.lang = lang | ||
this.utter.pitch = pitch | ||
this.utter.rate = rate | ||
this.utter.volume = volume | ||
this.utter.text = text | ||
this.getVoices() | ||
} | ||
|
||
getVoices() { | ||
window.speechSynthesis.onvoiceschanged = () => { | ||
this.voices = window.speechSynthesis.getVoices() | ||
if (this.voices.length > 0) | ||
this.utter.voice = this.voices[0] // 设置声音来源 | ||
} | ||
} | ||
|
||
// 开始播放当前的语音 | ||
start() { | ||
window.speechSynthesis.speak(this.utter) | ||
} | ||
|
||
// 暂停播放 | ||
pause() { | ||
window.speechSynthesis.pause() | ||
} | ||
|
||
// 暂停之后继续播放 | ||
resume() { | ||
window.speechSynthesis.resume() | ||
} | ||
|
||
// 清空所有播放 | ||
cancel() { | ||
window.speechSynthesis.cancel() | ||
} | ||
|
||
// 切换语音的内容 | ||
change(text: string) { | ||
this.utter.text = text | ||
window.speechSynthesis.speak(this.utter) | ||
} | ||
} | ||
|
||
export class Recognition { | ||
public recognition: any | ||
public isListening: boolean | ||
public result: string | ||
|
||
constructor(lang = 'zh-CN') { | ||
this.recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition || window.mozSpeechRecognition || window.msSpeechRecognition)() | ||
this.isListening = false | ||
this.result = '' | ||
this.recognition.lang = lang | ||
} | ||
|
||
// 开始语音识别 | ||
start() { | ||
this.isListening = true | ||
this.recognition.start() | ||
} | ||
|
||
// 结束语音识别 | ||
stop() { | ||
this.isListening = false | ||
this.recognition.stop() | ||
} | ||
|
||
// 监听语音识别的结果 | ||
onResult(callback: (result: string) => void) { | ||
this.recognition.onresult = (e: any) => { | ||
const result = e.results[0][0].transcript | ||
this.result = result | ||
callback(result) | ||
} | ||
import type { VoiceInfo } from 'microsoft-cognitiveservices-speech-sdk' | ||
import { | ||
AudioConfig, | ||
SpeechConfig, | ||
SpeechRecognizer, | ||
SpeechSynthesizer, | ||
} from 'microsoft-cognitiveservices-speech-sdk' | ||
|
||
export class SpeechService { | ||
private recognizer: SpeechRecognizer | ||
private synthesizer: SpeechSynthesizer | ||
private speechConfig: SpeechConfig | ||
constructor(subscriptionKey: string, region: string) { | ||
const speechConfig = SpeechConfig.fromSubscription(subscriptionKey, region) | ||
speechConfig.speechRecognitionLanguage = 'en-US' | ||
speechConfig.speechSynthesisLanguage = 'en-US' | ||
speechConfig.speechSynthesisVoiceName = 'en-US-GuyNeural' | ||
|
||
this.speechConfig = speechConfig | ||
|
||
const audioConfig = AudioConfig.fromDefaultMicrophoneInput() | ||
this.recognizer = new SpeechRecognizer(this.speechConfig, audioConfig) | ||
this.synthesizer = new SpeechSynthesizer(this.speechConfig) | ||
} | ||
|
||
public recognizeSpeech(): Promise<string> { | ||
return new Promise((resolve, reject) => { | ||
this.recognizer.recognizeOnceAsync((result) => { | ||
if (result.text) | ||
resolve(result.text) | ||
else | ||
reject(new Error('语音识别失败')) | ||
}) | ||
}) | ||
} | ||
|
||
public textToSpeak(text: string, voice?: string) { | ||
this.speechConfig.speechSynthesisVoiceName = voice || this.speechConfig.speechSynthesisVoiceName | ||
this.synthesizer.speakTextAsync(text) | ||
} | ||
|
||
public async getVoices(): Promise<VoiceInfo[]> { | ||
const res = await this.synthesizer.getVoicesAsync() | ||
return res.voices | ||
} | ||
} | ||
|