You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
# introduction = "lmdeploy is a toolkit for compressing, deploying, and servicing Large Language Models (LLMs). It is a deployment tool for transformer-structured LLMs in server-side scenarios, supporting GPU server-side deployment, ensuring speed, and supporting Tensor Parallel along with optimizations for multiple concurrent processes. It offers comprehensive features including model conversion, cache features for caching historical sessions and more. Additionally, it provides access via WebUI, command line, and gRPC clients."
94
+
# add your repo here, we just take opencompass and lmdeploy as example
95
+
96
+
[sg_search.mmpose]
97
+
github_repo_id = "open-mmlab/mmpose"
98
+
introduction = "MMPose is an open-source toolbox for pose estimation based on PyTorch"
99
+
100
+
[sg_search.mmdetection]
101
+
github_repo_id = "open-mmlab/mmdetection"
102
+
introduction = "MMDetection is an open source object detection toolbox based on PyTorch."
introduction = "XTuner is an efficient, flexible and full-featured toolkit for fine-tuning large models."
111
+
112
+
[sg_search.mmyolo]
113
+
github_repo_id = "open-mmlab/mmyolo"
114
+
introduction = "OpenMMLab YOLO series toolbox and benchmark. Implemented RTMDet, RTMDet-Rotated,YOLOv5, YOLOv6, YOLOv7, YOLOv8,YOLOX, PPYOLOE, etc."
115
+
116
+
[sg_search.Amphion]
117
+
github_repo_id = "open-mmlab/Amphion"
118
+
introduction = "Amphion is a toolkit for Audio, Music, and Speech Generation. Its purpose is to support reproducible research and help junior researchers and engineers get started in the field of audio, music, and speech generation research and development."
119
+
120
+
[sg_search.mmcv]
121
+
github_repo_id = "open-mmlab/mmcv"
122
+
introduction = "MMCV is a foundational library for computer vision research and it provides image/video processing, image and annotation visualization, image transformation, various CNN architectures and high-quality implementation of common CPU and CUDA ops"
83
123
84
124
[frontend]
85
-
# chat group assistant type, support "lark", "lark_group", "wechat_personal" and "none"
86
-
# for "lark", open https://open.feishu.cn/document/client-docs/bot-v3/add-custom-bot to add bot, **only send, cannot receive**
125
+
# chat group assistant type, support "lark_group", "wechat_personal", "wechat_wkteam" and "none"
87
126
# for "lark_group", open https://open.feishu.cn/document/home/introduction-to-custom-app-development/self-built-application-development-process to create one
88
127
# for "wechat_personal", read ./docs/add_wechat_group_zh.md to setup gateway
128
+
# for "wkteam", see https://wkteam.cn/
89
129
type = "none"
90
130
91
131
# for "lark", it is chat group webhook url, send reply to group, for example "https://open.feishu.cn/open-apis/bot/v2/hook/xxxxxxxxxxxxxxx"
@@ -107,3 +147,54 @@ verification_token = "def"
107
147
[frontend.wechat_personal]
108
148
# "wechat_personal" listen port
109
149
bind_port = 9527
150
+
151
+
[frontend.wechat_wkteam]
152
+
# wechat message callback server ip
153
+
callback_ip = "101.133.161.11"
154
+
callback_port = 9528
155
+
156
+
# public redis config
157
+
redis_host = "101.133.161.11"
158
+
redis_port = "6380"
159
+
redis_passwd = "hxd123"
160
+
161
+
# wkteam
162
+
account = ""
163
+
password = ""
164
+
# !!! `proxy` is very import parameter, it's your account location
# introduction = "For evaluating Large Language Models (LLMs). It provides a fully open-source, reproducible evaluation framework, supporting one-stop evaluation for large language models and multimodal models. Based on distributed technology, it can efficiently evaluate models with a large number of parameters. The evaluation directions are summarized in five capability dimensions: knowledge, language, understanding, reasoning, and examination. It integrates and collects more than 70 evaluation datasets, providing in total over 400,000 model evaluation questions. Additionally, it offers evaluations for three types of capabilities specific to large models: long text, security, and coding."
# introduction = "lmdeploy is a toolkit for compressing, deploying, and servicing Large Language Models (LLMs). It is a deployment tool for transformer-structured LLMs in server-side scenarios, supporting GPU server-side deployment, ensuring speed, and supporting Tensor Parallel along with optimizations for multiple concurrent processes. It offers comprehensive features including model conversion, cache features for caching historical sessions and more. Additionally, it provides access via WebUI, command line, and gRPC clients."
83
+
84
+
[frontend]
85
+
# chat group assistant type, support "lark", "lark_group", "wechat_personal" and "none"
86
+
# for "lark", open https://open.feishu.cn/document/client-docs/bot-v3/add-custom-bot to add bot, **only send, cannot receive**
87
+
# for "lark_group", open https://open.feishu.cn/document/home/introduction-to-custom-app-development/self-built-application-development-process to create one
88
+
# for "wechat_personal", read ./docs/add_wechat_group_zh.md to setup gateway
89
+
type = "none"
90
+
91
+
# for "lark", it is chat group webhook url, send reply to group, for example "https://open.feishu.cn/open-apis/bot/v2/hook/xxxxxxxxxxxxxxx"
92
+
# for "lark_group", it is the url to fetch chat group message, for example "http://101.133.161.20:6666/fetch", `101.133.161.20` is your own public IPv4 addr
0 commit comments