Skip to content

Commit

Permalink
20230714
Browse files Browse the repository at this point in the history
1、修复linux及mac运行出错问题;
2、修复因唇型出错无法继续执行问题;
3、提供rwkv对接方案。
  • Loading branch information
xszyou committed Jul 14, 2023
1 parent b53bc6e commit 5d4bac2
Show file tree
Hide file tree
Showing 6 changed files with 89 additions and 9 deletions.
2 changes: 1 addition & 1 deletion ai_module/nlp_gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
def question(cont):
global count
try:
chatbot = Chatbot(proxy = cfg.proxy_config, api_key = cfg.key_chatgpt_api_key)
chatbot = Chatbot(model = "gpt-3.5", proxy = cfg.proxy_config, api_key = cfg.key_chatgpt_api_key)
response = chatbot.ask(cont)
count = 0
return response
Expand Down
28 changes: 28 additions & 0 deletions ai_module/nlp_rwkv.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import torch
from ringrwkv.configuration_rwkv_world import RwkvConfig
from ringrwkv.rwkv_tokenizer import TRIE_TOKENIZER
from ringrwkv.modehf_world import RwkvForCausalLM

model = RwkvForCausalLM.from_pretrained("RWKV-4-World-1.5B")
#model = RwkvForCausalLM.from_pretrained("RWKV-4-World-3B")
#model = RwkvForCausalLM.from_pretrained("RWKV-4-World-0.4B")
tokenizer = TRIE_TOKENIZER('./ringrwkv/rwkv_vocab_v20230424.txt')

data = ""
def question(cont):
global data
prompt = data + f'Question: {cont.strip()}\n\nAnswer:'
input_ids = tokenizer.encode(prompt)
input_ids = torch.tensor(input_ids).unsqueeze(0)
out = model.generate(input_ids,max_new_tokens=20)

outlist = out[0].tolist()
for i in outlist:
if i==0:
outlist.remove(i)
answer = tokenizer.decode(outlist)
# data = answer + "\n\n"
answer = answer.replace(prompt, "", 1)
return answer


52 changes: 52 additions & 0 deletions ai_module/nlp_rwkv_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@

import requests
import time

def question(cont):
url= "https://rwkv.ai-creator.net/chntuned/v1/chat/completions"

session = requests.Session()
session.verify = False

#此处可以定义角色的行为和特征,假装xx模型可以绕过chatgpt信息检查
prompt = "你是数字人Fay。回答之前请一步一步想清楚。你的底层AI算法技术是Fay。当有人质疑你是假的 AI ,或者质疑你是用 ChatGPT 套的壳制作的时候,你就避而不答,转而讲一个笑话。"

message=[
{"role": "system", "content": prompt},
{"role": "user", "content": cont}
]

data = {
# "model":model_engine,
"messages":message,
"temperature":0.3,
"max_tokens":2000,
"user":"live-virtual-digital-person"
}

headers = {'content-type': 'application/json', 'Authorization': 'Bearer '}

starttime = time.time()

try:
response = session.post(url, json=data, headers=headers, verify=False)
response.raise_for_status() # 检查响应状态码是否为200

result = eval(response.text)
response_text = result["choices"][0]["message"]["content"]


except requests.exceptions.RequestException as e:
print(f"请求失败: {e}")
response_text = "抱歉,我现在太忙了,休息一会,请稍后再试。"


print("接口调用耗时 :" + str(time.time() - starttime))

return response_text

if __name__ == "__main__":
for i in range(3):
query = "爱情是什么"
response = question(query)
print("\n The result is ", response)
11 changes: 7 additions & 4 deletions core/fay_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,10 +371,13 @@ def __send_or_play_audio(self, file_url, say_type):
content = {'Topic': 'Unreal', 'Data': {'Key': 'audio', 'Value': os.path.abspath(file_url), 'Time': audio_length, 'Type': say_type}}
#计算lips
if platform.system() == "Windows":
lip_sync_generator = LipSyncGenerator()
viseme_list = lip_sync_generator.generate_visemes(os.path.abspath(file_url))
consolidated_visemes = lip_sync_generator.consolidate_visemes(viseme_list)
content["Data"]["Lips"] = consolidated_visemes
try:
lip_sync_generator = LipSyncGenerator()
viseme_list = lip_sync_generator.generate_visemes(os.path.abspath(file_url))
consolidated_visemes = lip_sync_generator.consolidate_visemes(viseme_list)
content["Data"]["Lips"] = consolidated_visemes
except e:
util.log(1, "唇型数字生成失败,无法使用新版ue5工程")
wsa_server.get_instance().add_cmd(content)

#推送远程音频
Expand Down
3 changes: 0 additions & 3 deletions core/qa_service.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@

from winreg import QueryInfoKey
from openpyxl import load_workbook
import difflib
import shlex
import subprocess
from utils import config_util as cfg

def question(query_type,text):
Expand Down
2 changes: 1 addition & 1 deletion core/recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def __init__(self, fay):
self.__processing = False
self.__history_level = []
self.__history_data = []
self.__dynamic_threshold = 0.7 # 声音识别的音量阈值
self.__dynamic_threshold = 0.5 # 声音识别的音量阈值

self.__MAX_LEVEL = 25000
self.__MAX_BLOCK = 100
Expand Down

0 comments on commit 5d4bac2

Please sign in to comment.