Skip to content

Commit 02f3565

Browse files
committed
close connection within exception
1 parent cfa39bb commit 02f3565

File tree

2 files changed

+10
-12
lines changed

2 files changed

+10
-12
lines changed

openChat.py

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import asyncio, websockets, os, sys, json, ssl
2+
from datetime import datetime
23
from typing import Any
34
from langchain_openai import ChatOpenAI
45
from langchain.memory import ConversationBufferWindowMemory, ConversationBufferMemory
@@ -20,6 +21,7 @@
2021
"gpt-4": 4096,
2122
"gpt-4-turbo": 8192
2223
}
24+
start_time = 0
2325

2426
# ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
2527
# ssl_context.load_cert_chain(certfile='leither.uk.orig.pem', keyfile='leither.uk.cert.pem')
@@ -53,6 +55,7 @@ async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
5355
while True:
5456
try:
5557
async for message in websocket:
58+
start_time = datetime.now()
5659
event = json.loads(message)
5760
params = event["parameters"]
5861
if params["llm"] == "openai":
@@ -64,9 +67,6 @@ async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
6467
elif params["llm"] == "qianfan":
6568
pass
6669

67-
# if params["client"] == "mobile":
68-
# CHAT_LLM.streaming = False
69-
7070
if "rawtext" in event["input"]:
7171
print(message)
7272
# the request is from secretary APP. If it is too long, seperate it.
@@ -89,7 +89,7 @@ async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
8989
print(chunk.content, end="|", flush=True) # chunk size can be big
9090
resp += chunk.content
9191
await websocket.send(json.dumps({"type": "stream", "data": chunk.content}))
92-
print(cb)
92+
print('\n', cb)
9393
sys.stdout.flush()
9494
await websocket.send(json.dumps({
9595
"type": "result",
@@ -115,7 +115,7 @@ async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
115115
chain = ConversationChain(llm=CHAT_LLM, memory=memory, output_parser=StrOutputParser())
116116
async for chunk in chain.astream(event["input"]["query"]):
117117
print(chunk, end="|", flush=True) # chunk size can be big
118-
print(cb)
118+
print('\n', cb)
119119
sys.stdout.flush()
120120
await websocket.send(json.dumps({
121121
"type": "result",
@@ -124,14 +124,11 @@ async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
124124
"cost": cb.total_cost}))
125125

126126
except websockets.exceptions.WebSocketException as e:
127-
# keep abnormal messages from logging
128-
# print("Error:", type(e), e)
129-
pass
130-
finally:
131127
try:
132128
await websocket.close()
133-
except NameError:
134-
pass
129+
finally:
130+
print("Websocket closed abnormally", e)
131+
break
135132

136133
async def main():
137134
# async with websockets.serve(handler, "", 8505, ssl=ssl_context):

openaiCBHandler.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
4242

4343
# update shared state behind lock
4444
with self._lock:
45-
self.total_cost += prompt_cost + completion_cost
45+
self.total_cost = prompt_cost + completion_cost
46+
# self.total_cost += prompt_cost + completion_cost
4647
self.total_tokens = self.prompt_tokens + self.completion_tokens
4748
self.successful_requests += 1
4849

0 commit comments

Comments
 (0)