-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
4c19ac6
commit ed12db1
Showing
4 changed files
with
405 additions
and
4 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,104 @@ | ||
import os | ||
from typing import Union, List | ||
from urllib.parse import urlparse | ||
|
||
import pydantic | ||
import uvicorn | ||
from fastapi import FastAPI | ||
from fastapi.staticfiles import StaticFiles | ||
from pydantic import BaseModel | ||
from starlette import status | ||
from starlette.middleware.cors import CORSMiddleware | ||
from starlette.responses import JSONResponse, RedirectResponse | ||
|
||
|
||
async def document(): | ||
return RedirectResponse(url="/docs") | ||
|
||
|
||
class BaseResponse(BaseModel): | ||
code: int = pydantic.Field(200, description="HTTP status code") | ||
msg: str = pydantic.Field("success", description="HTTP status message") | ||
|
||
class Config: | ||
schema_extra = { | ||
"example": { | ||
"code": 200, | ||
"msg": "success", | ||
} | ||
} | ||
|
||
|
||
class ListDocsResponse(BaseResponse): | ||
data: List[dict] = pydantic.Field(..., description="List of document names") | ||
|
||
class Config: | ||
schema_extra = { | ||
"example": { | ||
"code": 200, | ||
"msg": "success", | ||
"data": ["doc1.docx", "doc2.pdf", "doc3.txt"], | ||
} | ||
} | ||
|
||
|
||
# 原文链接:https://blog.csdn.net/wenxingchen/article/details/129013509 | ||
def respSuccessJson(data: Union[list, dict, str] = None, msg: str = "Success"): | ||
""" 接口成功返回 """ | ||
return JSONResponse( | ||
status_code=status.HTTP_200_OK, | ||
content={ | ||
'code': 200, | ||
'msg': msg, | ||
'data': data or {} | ||
} | ||
) | ||
|
||
|
||
def uri_validator(x): | ||
try: | ||
result = urlparse(x) | ||
return all([result.scheme, result.netloc]) | ||
except: | ||
return False | ||
|
||
|
||
async def cli(audio_path: str, task_id: str): | ||
pass | ||
|
||
|
||
def api_start(host, port): | ||
global app | ||
global local_doc_qa | ||
|
||
app = FastAPI() | ||
root = os.path.abspath(os.path.join(os.path.basename(__file__), "../..")) | ||
print(root) | ||
|
||
app.mount("/static", StaticFiles(directory=f"{root}/static"), name="static") | ||
# 允许跨域 | ||
app.add_middleware( | ||
CORSMiddleware, | ||
allow_origins=["*"], | ||
allow_credentials=True, | ||
allow_methods=["*"], | ||
allow_headers=["*"], | ||
) | ||
# app.websocket("/local_doc_qa/stream-chat/{knowledge_base_id}")(stream_chat) | ||
|
||
app.get("/", response_model=BaseResponse)(document) | ||
|
||
app.post("/convert-text", response_model=BaseResponse)(cli) | ||
|
||
uvicorn.run(app, host=host, port=port) | ||
|
||
|
||
""" | ||
解决内网环境 FastAPI访问/docs接口文档显示空白、js/css无法加载 | ||
https://zhuanlan.zhihu.com/p/517645846?utm_id=0 | ||
https://blog.csdn.net/jaket5219999/article/details/135003381 | ||
""" | ||
if __name__ == '__main__': | ||
host = "0.0.0.0" | ||
port = 8000 | ||
api_start(host, port) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,128 @@ | ||
# -*- coding: utf-8 -*- | ||
# ------------------------------- | ||
# @项目:pyProject | ||
# @文件:qwen官方样例.py | ||
# @时间:2024/9/9 14:40 | ||
# @作者:xming | ||
# ------------------------------- | ||
import json | ||
import os | ||
|
||
from qwen_agent.llm import get_chat_model | ||
|
||
|
||
#硬编码返回相同天气的示例虚拟函数 | ||
#在生产中,这可能是您的后端API或外部API | ||
def get_current_weather(location, unit='fahrenheit'): | ||
"""Get the current weather in a given location""" | ||
if 'tokyo' in location.lower(): | ||
return json.dumps({ | ||
'location': 'Tokyo', | ||
'temperature': '10', | ||
'unit': 'celsius' | ||
}) | ||
elif 'san francisco' in location.lower(): | ||
return json.dumps({ | ||
'location': 'San Francisco', | ||
'temperature': '72', | ||
'unit': 'fahrenheit' | ||
}) | ||
elif 'paris' in location.lower(): | ||
return json.dumps({ | ||
'location': 'Paris', | ||
'temperature': '22', | ||
'unit': 'celsius' | ||
}) | ||
else: | ||
return json.dumps({'location': location, 'temperature': 'unknown'}) | ||
|
||
|
||
def test(): | ||
llm = get_chat_model({ | ||
# 使用DashScope提供的模型服务: | ||
'model': 'qwen-max', | ||
'model_server': 'dashscope', | ||
'api_key': os.getenv('DASHSCOPE_API_KEY'), | ||
|
||
# 使用Together提供的模型服务。人工智能: | ||
# 'model': 'Qwen/Qwen2-7B-Instruct', | ||
# 'model_server': 'https://api.together.xyz', # api_base | ||
# 'api_key': os.getenv('TOGETHER_API_KEY'), | ||
|
||
# Use your own model service compatible with OpenAI API: | ||
# 'model': 'Qwen/Qwen2-72B-Instruct', | ||
# 'model_server': 'http://localhost:8000/v1', # api_base | ||
# 'api_key': 'EMPTY', | ||
}) | ||
|
||
# Step 1:将对话和可用功能发送给模型 | ||
messages = [{ | ||
'role': 'user', | ||
'content': "What's the weather like in San Francisco?" | ||
}] | ||
functions = [{ | ||
'name': 'get_current_weather', | ||
'description': 'Get the current weather in a given location', | ||
'parameters': { | ||
'type': 'object', | ||
'properties': { | ||
'location': { | ||
'type': 'string', | ||
'description': | ||
'The city and state, e.g. San Francisco, CA', | ||
}, | ||
'unit': { | ||
'type': 'string', | ||
'enum': ['celsius', 'fahrenheit'] | ||
}, | ||
}, | ||
'required': ['location'], | ||
}, | ||
}] | ||
|
||
print('# Assistant Response 1:') | ||
responses = [] | ||
for responses in llm.chat(messages=messages, | ||
functions=functions, | ||
stream=True): | ||
print(responses) | ||
|
||
messages.extend(responses) # 用助理的回复扩展对话 | ||
|
||
# Step 2: 检查模型是否要调用函数 | ||
last_response = messages[-1] | ||
if last_response.get('function_call', None): | ||
|
||
# Step 3:调用函数 | ||
# 注意:JSON响应可能并不总是有效的;一定要处理错误 | ||
available_functions = { | ||
'get_current_weather': get_current_weather, | ||
} # 在这个例子中只有一个函数,但你可以有多个 | ||
function_name = last_response['function_call']['name'] | ||
function_to_call = available_functions[function_name] | ||
function_args = json.loads(last_response['function_call']['arguments']) | ||
function_response = function_to_call( | ||
location=function_args.get('location'), | ||
unit=function_args.get('unit'), | ||
) | ||
print('# Function Response:') | ||
print(function_response) | ||
|
||
# Step 4: 将每个函数调用和函数响应的信息发送到模型 | ||
messages.append({ | ||
'role': 'function', | ||
'name': function_name, | ||
'content': function_response, | ||
}) # 用功能响应扩展对话 | ||
|
||
print('# Assistant Response 2:') | ||
for responses in llm.chat( | ||
messages=messages, | ||
functions=functions, | ||
stream=True, | ||
): # 从模型中获取新的响应,在那里可以看到函数响应 | ||
print(responses) | ||
|
||
|
||
if __name__ == '__main__': | ||
test() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.