Skip to content

Commit

Permalink
feat#add minmax model
Browse files Browse the repository at this point in the history
  • Loading branch information
lmy668 committed Jun 12, 2024
1 parent 405372d commit 200cd33
Show file tree
Hide file tree
Showing 6 changed files with 271 additions and 17 deletions.
4 changes: 4 additions & 0 deletions bot/bot_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,10 @@ def create_bot(bot_type):
elif bot_type == const.MOONSHOT:
from bot.moonshot.moonshot_bot import MoonshotBot
return MoonshotBot()

elif bot_type == const.MiniMax:
from bot.minimax.minimax_bot import MinimaxBot
return MinimaxBot()


raise RuntimeError
151 changes: 151 additions & 0 deletions bot/minimax/minimax_bot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
# encoding:utf-8

import time

import openai
import openai.error
from bot.bot import Bot
from bot.minimax.minimax_session import MinimaxSession
from bot.session_manager import SessionManager
from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, load_config
from bot.chatgpt.chat_gpt_session import ChatGPTSession
import requests
from common import const


# ZhipuAI对话模型API
class MinimaxBot(Bot):
def __init__(self):
super().__init__()
self.args = {
"model": conf().get("model") or "abab6.5", # 对话模型的名称
"temperature": conf().get("temperature", 0.3), # 如果设置,值域须为 [0, 1] 我们推荐 0.3,以达到较合适的效果。
"top_p": conf().get("top_p", 0.95), # 使用默认值
}
self.api_key = conf().get("Minimax_api_key")
self.group_id = conf().get("Minimax_group_id")
self.base_url = conf().get("Minimax_base_url", f"https://api.minimax.chat/v1/text/chatcompletion_pro?GroupId={self.group_id}")
# tokens_to_generate/bot_setting/reply_constraints可自行修改
self.request_body = {
"model": self.args["model"],
"tokens_to_generate": 2048,
"reply_constraints": {"sender_type": "BOT", "sender_name": "MM智能助理"},
"messages": [],
"bot_setting": [
{
"bot_name": "MM智能助理",
"content": "MM智能助理是一款由MiniMax自研的,没有调用其他产品的接口的大型语言模型。MiniMax是一家中国科技公司,一直致力于进行大模型相关的研究。",
}
],
}
self.sessions = SessionManager(MinimaxSession, model=const.MiniMax)

def reply(self, query, context: Context = None) -> Reply:
# acquire reply content
logger.info("[Minimax_AI] query={}".format(query))
if context.type == ContextType.TEXT:
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[Minimax_AI] session query={}".format(session))

model = context.get("Minimax_model")
new_args = self.args.copy()
if model:
new_args["model"] = model
# if context.get('stream'):
# # reply in stream
# return self.reply_text_stream(query, new_query, session_id)

reply_content = self.reply_text(session, args=new_args)
logger.debug(
"[Minimax_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[Minimax_AI] reply {} used 0 tokens.".format(reply_content))
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply

def reply_text(self, session: MinimaxSession, args=None, retry_count=0) -> dict:
"""
call openai's ChatCompletion to get the answer
:param session: a conversation session
:param session_id: session id
:param retry_count: retry count
:return: {}
"""
try:
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + self.api_key}
self.request_body["messages"].extend(session.messages)
logger.info("[Minimax_AI] request_body={}".format(self.request_body))
# logger.info("[Minimax_AI] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
res = requests.post(self.base_url, headers=headers, json=self.request_body)

# self.request_body["messages"].extend(response.json()["choices"][0]["messages"])
if res.status_code == 200:
response = res.json()
return {
"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["total_tokens"],
"content": response["reply"],
}
else:
response = res.json()
error = response.get("error")
logger.error(f"[Minimax_AI] chat failed, status_code={res.status_code}, " f"msg={error.get('message')}, type={error.get('type')}")

result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
need_retry = False
if res.status_code >= 500:
# server error, need retry
logger.warn(f"[Minimax_AI] do retry, times={retry_count}")
need_retry = retry_count < 2
elif res.status_code == 401:
result["content"] = "授权失败,请检查API Key是否正确"
elif res.status_code == 429:
result["content"] = "请求过于频繁,请稍后再试"
need_retry = retry_count < 2
else:
need_retry = False

if need_retry:
time.sleep(3)
return self.reply_text(session, args, retry_count + 1)
else:
return result
except Exception as e:
logger.exception(e)
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
return self.reply_text(session, args, retry_count + 1)
else:
return result
72 changes: 72 additions & 0 deletions bot/minimax/minimax_session.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
from bot.session_manager import Session
from common.log import logger

"""
e.g.
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
"""


class MinimaxSession(Session):
def __init__(self, session_id, system_prompt=None, model="minimax"):
super().__init__(session_id, system_prompt)
self.model = model
# self.reset()

def add_query(self, query):
user_item = {"sender_type": "USER", "sender_name": self.session_id, "text": query}
self.messages.append(user_item)

def add_reply(self, reply):
assistant_item = {"sender_type": "BOT", "sender_name": "MM智能助理", "text": reply}
self.messages.append(assistant_item)

def discard_exceeding(self, max_tokens, cur_tokens=None):
precise = True
try:
cur_tokens = self.calc_tokens()
except Exception as e:
precise = False
if cur_tokens is None:
raise e
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
while cur_tokens > max_tokens:
if len(self.messages) > 2:
self.messages.pop(1)
elif len(self.messages) == 2 and self.messages[1]["sender_type"] == "BOT":
self.messages.pop(1)
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
break
elif len(self.messages) == 2 and self.messages[1]["sender_type"] == "USER":
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
break
else:
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
break
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
return cur_tokens

def calc_tokens(self):
return num_tokens_from_messages(self.messages, self.model)


def num_tokens_from_messages(messages, model):
"""Returns the number of tokens used by a list of messages."""
# 官方token计算规则:"对于中文文本来说,1个token通常对应一个汉字;对于英文文本来说,1个token通常对应3至4个字母或1个单词"
# 详情请产看文档:https://help.aliyun.com/document_detail/2586397.html
# 目前根据字符串长度粗略估计token数,不影响正常使用
tokens = 0
for msg in messages:
tokens += len(msg["text"])
return tokens
3 changes: 3 additions & 0 deletions bridge/bridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@ def __init__(self):
if model_type in ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
self.btype["chat"] = const.MOONSHOT

if model_type in ["abab6.5-chat"]:
self.btype["chat"] = const.MiniMax

if conf().get("use_linkai") and conf().get("linkai_api_key"):
self.btype["chat"] = const.LINKAI
if not conf().get("voice_to_text") or conf().get("voice_to_text") in ["openai"]:
Expand Down
35 changes: 30 additions & 5 deletions common/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
CHATGPTONAZURE = "chatGPTOnAzure"
LINKAI = "linkai"
CLAUDEAI = "claude"
CLAUDEAPI= "claudeAPI"
CLAUDEAPI = "claudeAPI"
QWEN = "qwen"

QWEN_DASHSCOPE = "dashscope"
Expand All @@ -17,6 +17,7 @@
GEMINI = "gemini"
ZHIPU_AI = "glm-4"
MOONSHOT = "moonshot"
MiniMax = "minimax"


# model
Expand All @@ -35,10 +36,34 @@
TTS_1 = "tts-1"
TTS_1_HD = "tts-1-hd"

MODEL_LIST = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "wenxin", "wenxin-4", "xunfei", "claude", "claude-3-opus-20240229", "gpt-4-turbo",
"gpt-4-turbo-preview", "gpt-4-1106-preview", GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT_4o, QWEN, GEMINI, ZHIPU_AI, MOONSHOT,
QWEN_TURBO, QWEN_PLUS, QWEN_MAX, LINKAI_35, LINKAI_4_TURBO, LINKAI_4o]
MODEL_LIST = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-4",
"wenxin",
"wenxin-4",
"xunfei",
"claude",
"claude-3-opus-20240229",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
GPT4_TURBO_PREVIEW,
GPT4_TURBO_01_25,
GPT_4o,
QWEN,
GEMINI,
ZHIPU_AI,
MOONSHOT,
QWEN_TURBO,
QWEN_PLUS,
QWEN_MAX,
LINKAI_35,
LINKAI_4_TURBO,
LINKAI_4o,
MiniMax,
]

# channel
FEISHU = "feishu"
DINGTALK = "dingtalk"
DINGTALK = "dingtalk"
23 changes: 11 additions & 12 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
"group_name_keyword_white_list": [], # 开启自动回复的群名称关键词列表
"group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称
"nick_name_black_list": [], # 用户昵称黑名单
"group_welcome_msg": "", # 配置新人进群固定欢迎语,不配置则使用随机风格欢迎
"group_welcome_msg": "", # 配置新人进群固定欢迎语,不配置则使用随机风格欢迎
"trigger_by_self": False, # 是否允许机器人触发
"text_to_image": "dall-e-2", # 图片生成模型,可选 dall-e-2, dall-e-3
# Azure OpenAI dall-e-3 配置
Expand All @@ -48,7 +48,7 @@
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
"concurrency_in_session": 1, # 同一会话最多有多少条消息在处理中,大于1可能乱序
"image_create_size": "256x256", # 图片大小,可选有 256x256, 512x512, 1024x1024 (dall-e-3默认为1024x1024)
"group_chat_exit_group": False,
"group_chat_exit_group": False,
# chatgpt会话参数
"expires_in_seconds": 3600, # 无操作会话的过期时间
# 人格描述
Expand Down Expand Up @@ -76,7 +76,7 @@
"claude_api_cookie": "",
"claude_uuid": "",
# claude api key
"claude_api_key":"",
"claude_api_key": "",
# 通义千问API, 获取方式查看文档 https://help.aliyun.com/document_detail/2587494.html
"qwen_access_key_id": "",
"qwen_access_key_secret": "",
Expand Down Expand Up @@ -108,8 +108,8 @@
"azure_voice_api_key": "",
"azure_voice_region": "japaneast",
# elevenlabs 语音api配置
"xi_api_key": "", #获取ap的方法可以参考https://docs.elevenlabs.io/api-reference/quick-start/authentication
"xi_voice_id": "", #ElevenLabs提供了9种英式、美式等英语发音id,分别是“Adam/Antoni/Arnold/Bella/Domi/Elli/Josh/Rachel/Sam”
"xi_api_key": "", # 获取ap的方法可以参考https://docs.elevenlabs.io/api-reference/quick-start/authentication
"xi_voice_id": "", # ElevenLabs提供了9种英式、美式等英语发音id,分别是“Adam/Antoni/Arnold/Bella/Domi/Elli/Josh/Rachel/Sam”
# 服务时间限制,目前支持itchat
"chat_time_module": False, # 是否开启服务时间限制
"chat_start_time": "00:00", # 服务开始时间
Expand Down Expand Up @@ -137,14 +137,12 @@
"wechatcomapp_secret": "", # 企业微信app的secret
"wechatcomapp_agent_id": "", # 企业微信app的agent_id
"wechatcomapp_aes_key": "", # 企业微信app的aes_key

# 飞书配置
"feishu_port": 80, # 飞书bot监听端口
"feishu_app_id": "", # 飞书机器人应用APP Id
"feishu_app_secret": "", # 飞书机器人APP secret
"feishu_token": "", # 飞书 verification token
"feishu_bot_name": "", # 飞书机器人的名字

# 钉钉配置
"dingtalk_client_id": "", # 钉钉机器人Client ID
"dingtalk_client_secret": "", # 钉钉机器人Client Secret
Expand All @@ -161,18 +159,21 @@
"plugin_trigger_prefix": "$", # 规范插件提供聊天相关指令的前缀,建议不要和管理员指令前缀"#"冲突
# 是否使用全局插件配置
"use_global_plugin_config": False,
"max_media_send_count": 3, # 单次最大发送媒体资源的个数
"max_media_send_count": 3, # 单次最大发送媒体资源的个数
"media_send_interval": 1, # 发送图片的事件间隔,单位秒
# 智谱AI 平台配置
"zhipu_ai_api_key": "",
"zhipu_ai_api_base": "https://open.bigmodel.cn/api/paas/v4",
"moonshot_api_key": "",
"moonshot_base_url":"https://api.moonshot.cn/v1/chat/completions",
"moonshot_base_url": "https://api.moonshot.cn/v1/chat/completions",
# LinkAI平台配置
"use_linkai": False,
"linkai_api_key": "",
"linkai_app_code": "",
"linkai_api_base": "https://api.link-ai.tech", # linkAI服务地址
"Minimax_api_key": "",
"Minimax_group_id": "",
"Minimax_base_url": "",
}


Expand Down Expand Up @@ -346,6 +347,4 @@ def pconf(plugin_name: str) -> dict:


# 全局配置,用于存放全局生效的状态
global_config = {
"admin_users": []
}
global_config = {"admin_users": []}

0 comments on commit 200cd33

Please sign in to comment.