Replies: 1 comment
-
def fetch_chat_stream_result(model_name="",global_context_json=""):
data_req={"model":model_name,"messages":global_context_json,"stream": True}
request = urllib.request.Request(urllib.parse.urljoin(api_url, "/api/chat"),data=json.dumps(data_req).encode("utf-8"),headers={"Content-Type": "application/json"},method="POST")
with urllib.request.urlopen(request) as resp:
for line in resp:
data = json.loads(line.decode("utf-8"))
if "message" in data:
time.sleep(INTERVAL_SECONDS_a)
yield data["message"]["content"]
def generate_multigraph_fixed_ai_response(ipt_tag="",reply_tag="",global_context_json="",table_parent="",q_tag="",chat_context_json_path="",flowing_flag=""):
global new_complete_message, is_output_paused
try:
model_name=dpg.get_value(ipt_tag)
ai_message="{}回答:::".format(model_name)
count=0
for one_word in fetch_chat_stream_result(model_name=model_name,global_context_json=global_context_json):
# 添加暂停检查
while is_output_paused:
time.sleep(0.1)
continue
replay_content=dpg.get_value(reply_tag)
if count%70==0:
dpg.configure_item(reply_tag,default_value=replay_content+"\n"+one_word)
else:
dpg.configure_item(reply_tag,default_value=replay_content+one_word)
ai_message += one_word
count+=1
global_context_json.append({"role": "assistant", "content": ai_message})
replay_content=dpg.get_value(reply_tag)
new_complete_message=replay_content
think_chain,reply_content=extract_think_content(input_string=new_complete_message)
one_response_to_current_table(table_parent=table_parent,q_tag=q_tag,think_chain=think_chain,new_complete_message=reply_content,chat_context_json_path=chat_context_json_path)
dpg.configure_item(reply_tag,default_value=replay_content+"\n\n")
except Exception as e:
print(e,11111111)
finally:
pass if use api key check the api document from model provider. def stream_chat_response_APICALL_FROM_MODEL_PROVIDER(q_tag="",messages="", reply_tag="", table_tag="",tag_ipt_apikey="",json_path=""):
global is_output_paused
value_api_key=dpg.get_value(tag_ipt_apikey)
client_object_from_AIreq = OpenAI(api_key=value_api_key, base_url=base_url)
full_response=""
if model_api_ds=="deepseek-reasoner":
print(messages,"++++++++++++",model_api_ds)
try:
response = client_object_from_AIreq.chat.completions.create(
model=model_api_ds,
messages=messages,
stream=True
)
reasoning_content = ""
content = ""
for chunk in response:
while is_output_paused:
time.sleep(0.1)
continue
# 检查delta中的content
if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
content_reply = chunk.choices[0].delta.content
content += content_reply
# current_text = dpg.get_value(reply_tag)
# dpg.configure_item(reply_tag, default_value=current_text + content)
# 如果有reasoning_content,也处理它
if hasattr(chunk.choices[0].delta, 'reasoning_content') and chunk.choices[0].delta.reasoning_content:
reasoning = chunk.choices[0].delta.reasoning_content
reasoning_content+=reasoning
# full_response += f"\n推理过程:{reasoning}\n"
# current_text = dpg.get_value(reply_tag)
dpg.configure_item(reply_tag, default_value=reasoning_content+content)
full_response=reasoning_content + content
try:
if reasoning_content and content:
one_response_to_current_table(table_parent=table_tag,q_tag=q_tag,think_chain=reasoning_content,new_complete_message=content,chat_context_json_path=json_path)
except Exception as e:
print (e)
assert 1>2
# return full_response
except Exception as e:
print(e)
error_msg = f"API调用错误: {str(e)}"
# dpg.configure_item(reply_tag, default_value=error_msg)
# is_output_paused=
print(error_msg)
return error_msg Everytime commit a input message start a thread to configure |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
How to implement GPT or LLM, AI chat interface
....
chatbot
Input box
button
Chatbot
Human issues:
Robot answer:
Human issues:
Robot answer:
I'm not sure how to create such a chat interface
Beta Was this translation helpful? Give feedback.
All reactions