Skip to content

Commit

Permalink
♻️ refactor(model): clear and add models (lobehub#3208)
Browse files Browse the repository at this point in the history
  • Loading branch information
RubuJam authored Jul 22, 2024
1 parent 5aec15a commit ef54191
Show file tree
Hide file tree
Showing 14 changed files with 81 additions and 60 deletions.
4 changes: 2 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,9 @@
"@khmyznikov/pwa-install": "^0.3.9",
"@lobehub/chat-plugin-sdk": "^1.32.4",
"@lobehub/chat-plugins-gateway": "^1.9.0",
"@lobehub/icons": "^1.26.0",
"@lobehub/icons": "^1.27.0",
"@lobehub/tts": "^1.24.3",
"@lobehub/ui": "^1.146.6",
"@lobehub/ui": "^1.146.9",
"@microsoft/fetch-event-source": "^2.0.1",
"@neondatabase/serverless": "^0.9.4",
"@next/third-parties": "^14.2.4",
Expand Down
1 change: 1 addition & 0 deletions src/config/modelProviders/deepseek.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://platform.deepseek.com/api-docs/pricing
const DeepSeek: ModelProviderCard = {
chatModels: [
{
Expand Down
24 changes: 6 additions & 18 deletions src/config/modelProviders/google.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,42 +43,30 @@ const Google: ModelProviderCard = {
vision: true,
},
{
description: 'The best model for scaling across a wide range of tasks. This is the latest model.',
description:
'The best model for scaling across a wide range of tasks. This is the latest model.',
displayName: 'Gemini 1.0 Pro',
id: 'gemini-1.0-pro-latest',
maxOutput: 2048,
tokens: 30_720 + 2048,
},
{
description: 'The best model for scaling across a wide range of tasks. This is a stable model that supports tuning.',
description:
'The best model for scaling across a wide range of tasks. This is a stable model that supports tuning.',
displayName: 'Gemini 1.0 Pro 001 (Tuning)',
functionCall: true,
id: 'gemini-1.0-pro-001',
maxOutput: 2048,
tokens: 30_720 + 2048,
},
{
description: 'The best model for scaling across a wide range of tasks. Released April 9, 2024.',
description:
'The best model for scaling across a wide range of tasks. Released April 9, 2024.',
displayName: 'Gemini 1.0 Pro 002 (Tuning)',
id: 'gemini-1.0-pro-002',
maxOutput: 2048,
tokens: 30_720 + 2048,
},
{
description: 'The most capable model for highly complex tasks',
displayName: 'Gemini 1.0 Ultra',
id: 'gemini-ultra',
maxOutput: 2048,
tokens: 32_768,
},
{
description: 'A legacy text-only model optimized for chat conversations',
displayName: 'PaLM 2 Chat (Legacy)',
id: 'chat-bison-001',
legacy: true,
maxOutput: 1024,
// tokens: 4096 + 1024, // none tokens test
}
],
checkModel: 'gemini-1.5-flash',
id: 'google',
Expand Down
16 changes: 11 additions & 5 deletions src/config/modelProviders/groq.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import { ModelProviderCard } from '@/types/llm';
const Groq: ModelProviderCard = {
chatModels: [
{
displayName: 'LLaMA3-3-70B',
displayName: 'LLaMA3 70B',
enabled: true,
functionCall: true,
id: 'llama3-70b-8192',
Expand All @@ -18,14 +18,20 @@ const Groq: ModelProviderCard = {
tokens: 32_768,
},
{
displayName: 'Gemma-7b-it',
enabled: true,
displayName: 'Gemma 7B',
functionCall: true,
id: 'gemma-7b-it',
tokens: 8192,
},
{
displayName: 'LLaMA3-3-8B',
displayName: 'Gemma2 9B',
enabled: true,
functionCall: true,
id: 'gemma2-9b-it',
tokens: 8192,
},
{
displayName: 'LLaMA3 8B',
enabled: true,
functionCall: true,
id: 'llama3-8b-8192',
Expand All @@ -37,7 +43,7 @@ const Groq: ModelProviderCard = {
tokens: 4096,
},
],
checkModel: 'gemma-7b-it',
checkModel: 'gemma2-9b-it',
id: 'groq',
name: 'Groq',
proxyUrl: {
Expand Down
29 changes: 26 additions & 3 deletions src/config/modelProviders/ollama.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://ollama.com/library
const Ollama: ModelProviderCard = {
chatModels: [
{
Expand Down Expand Up @@ -78,6 +79,28 @@ const Ollama: ModelProviderCard = {
id: 'gemma:2b',
tokens: 8192,
},
{
displayName: 'Deepseek V2 16B',
enabled: true,
id: 'deepseek-v2',
tokens: 32_000,
},
{
displayName: 'Deepseek V2 236B',
id: 'deepseek-v2:236b',
tokens: 128_000,
},
{
displayName: 'Deepseek Coder V2 16B', // https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
enabled: true,
id: 'deepseek-coder-v2',
tokens: 128_000,
},
{
displayName: 'Deepseek Coder V2 236B', // https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
id: 'deepseek-coder-v2:236b',
tokens: 128_000,
},
{
displayName: 'Llama2 Chat 13B',
id: 'llama2:13b',
Expand Down Expand Up @@ -135,10 +158,10 @@ const Ollama: ModelProviderCard = {
tokens: 16_384,
},
{
displayName: 'Mistral',
displayName: 'MathΣtral',
enabled: true,
id: 'mistral',
tokens: 32_768, // https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/blob/main/config.json
id: 'mathstral',
tokens: 32_000, // https://huggingface.co/mistralai/mathstral-7B-v0.1
},
{
displayName: 'Mixtral 8x7B',
Expand Down
21 changes: 12 additions & 9 deletions src/config/modelProviders/openai.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://platform.openai.com/docs/models
// ref:
// https://platform.openai.com/docs/models
// https://platform.openai.com/docs/deprecations
const OpenAI: ModelProviderCard = {
chatModels: [
{
Expand Down Expand Up @@ -53,14 +55,14 @@ const OpenAI: ModelProviderCard = {
tokens: 128_000,
},
{
description: 'Currently points to gpt-4-1106-vision-preview',
description: 'Currently points to gpt-4-1106-vision-preview', // Will be discontinued on December 6, 2024
displayName: 'GPT-4 Turbo Vision Preview',
id: 'gpt-4-vision-preview',
tokens: 128_000,
vision: true,
},
{
displayName: 'GPT-4 Turbo Vision Preview (1106)',
displayName: 'GPT-4 Turbo Vision Preview (1106)', // Will be discontinued on December 6, 2024
id: 'gpt-4-1106-vision-preview',
tokens: 128_000,
vision: true,
Expand All @@ -85,20 +87,21 @@ const OpenAI: ModelProviderCard = {
tokens: 8192,
},
{
description: 'Currently points to gpt-4-32k-0613',
description: 'Currently points to gpt-4-32k-0613', // Will be discontinued on June 6, 2025
displayName: 'GPT-4 32K',
functionCall: true,
id: 'gpt-4-32k',
tokens: 32_768,
},
{
displayName: 'GPT-4 32K (0613)',
displayName: 'GPT-4 32K (0613)', // Will be discontinued on June 6, 2025
functionCall: true,
id: 'gpt-4-32k-0613',
tokens: 32_768,
},
{
description: 'GPT 3.5 Turbo,适用于各种文本生成和理解任务',
description:
'GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125',
displayName: 'GPT-3.5 Turbo',
functionCall: true,
id: 'gpt-3.5-turbo',
Expand All @@ -122,20 +125,20 @@ const OpenAI: ModelProviderCard = {
tokens: 4096,
},
{
description: 'Currently points to gpt-3.5-turbo-16k-0613',
description: 'Currently points to gpt-3.5-turbo-16k-0613', // Will be discontinued on September 13, 2024
displayName: 'GPT-3.5 Turbo 16K',
id: 'gpt-3.5-turbo-16k',
legacy: true,
tokens: 16_385,
},
{
displayName: 'GPT-3.5 Turbo (0613)',
displayName: 'GPT-3.5 Turbo (0613)', // Will be discontinued on September 13, 2024
id: 'gpt-3.5-turbo-0613',
legacy: true,
tokens: 4096,
},
{
displayName: 'GPT-3.5 Turbo 16K (0613)',
description: 'Currently points to gpt-3.5-turbo-16k-0613', // Will be discontinued on September 13, 2024
id: 'gpt-3.5-turbo-16k-0613',
legacy: true,
tokens: 16_385,
Expand Down
6 changes: 3 additions & 3 deletions src/config/modelProviders/perplexity.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import { ModelProviderCard } from '@/types/llm';
const Perplexity: ModelProviderCard = {
chatModels: [
{
displayName: 'Perplexity 7B Chat',
displayName: 'Perplexity 8B Chat',
id: 'llama-3-sonar-small-32k-chat',
tokens: 32_768,
},
Expand All @@ -15,7 +15,7 @@ const Perplexity: ModelProviderCard = {
tokens: 32_768,
},
{
displayName: 'Perplexity 7B Online',
displayName: 'Perplexity 8B Online',
id: 'llama-3-sonar-small-32k-online',
tokens: 28_000,
},
Expand All @@ -41,7 +41,7 @@ const Perplexity: ModelProviderCard = {
tokens: 16_384,
},
],
checkModel: 'pplx-7b-chat',
checkModel: 'llama-3-8b-instruct',
id: 'perplexity',
name: 'Perplexity',
proxyUrl: {
Expand Down
4 changes: 2 additions & 2 deletions src/config/modelProviders/togetherai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,9 @@ const TogetherAI: ModelProviderCard = {
tokens: 32_768,
},
{
displayName: 'Qwen 1.5 Chat (14B)',
displayName: 'Qwen 1.5 Chat (32B)',
enabled: true,
id: 'Qwen/Qwen1.5-14B-Chat',
id: 'Qwen/Qwen1.5-32B-Chat',
tokens: 32_768,
},
{
Expand Down
10 changes: 3 additions & 7 deletions src/config/modelProviders/zeroone.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ const ZeroOne: ModelProviderCard = {
tokens: 32_768,
},
{
description: '在 yi-large 模型的基础上支持并强化了工具调用的能力,适用于各种需要搭建 agent 或 workflow 的业务场景。',
description:
'在 yi-large 模型的基础上支持并强化了工具调用的能力,适用于各种需要搭建 agent 或 workflow 的业务场景。',
displayName: 'Yi Large FC',
enabled: true,
functionCall: true,
Expand Down Expand Up @@ -67,14 +68,9 @@ const ZeroOne: ModelProviderCard = {
id: 'yi-large-preview',
tokens: 16_384,
},
{
description: '「兼容版本模型」实时信息获取,以及文本推理能力增强。',
displayName: 'Yi Large RAG Preview',
id: 'yi-large-rag-preview',
tokens: 16_384,
},
],
checkModel: 'yi-large',
disableBrowserRequest: true,
id: 'zeroone',
name: '01.AI',
};
Expand Down
12 changes: 8 additions & 4 deletions src/config/modelProviders/zhipu.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@ import { ModelProviderCard } from '@/types/llm';
const ZhiPu: ModelProviderCard = {
chatModels: [
{
description: 'GLM-4-AllTools 是专门为支持智能体和相关任务而进一步优化的模型版本。它能够自主理解用户的意图,规划复杂的指令,并能够调用一个或多个工具(例如网络浏览器、代码解释器和文本生图像)以完成复杂的任务。',
description:
'GLM-4-AllTools 是专门为支持智能体和相关任务而进一步优化的模型版本。它能够自主理解用户的意图,规划复杂的指令,并能够调用一个或多个工具(例如网络浏览器、代码解释器和文本生图像)以完成复杂的任务。',
displayName: 'GLM-4-AllTools',
enabled: true,
functionCall: true,
Expand Down Expand Up @@ -50,22 +51,25 @@ const ZhiPu: ModelProviderCard = {
tokens: 128_000,
},
{
description: '实现了视觉语言特征的深度融合,支持视觉问答、图像字幕、视觉定位、复杂目标检测等各类图像理解任务',
description:
'实现了视觉语言特征的深度融合,支持视觉问答、图像字幕、视觉定位、复杂目标检测等各类图像理解任务',
displayName: 'GLM-4V',
enabled: true,
id: 'glm-4v',
tokens: 2000,
vision: true,
},
{
description: '适用于对知识量、推理能力、创造力要求较高的场景,比如广告文案、小说写作、知识类写作、代码生成等',
description:
'适用于对知识量、推理能力、创造力要求较高的场景,比如广告文案、小说写作、知识类写作、代码生成等', // Will be discontinued on December 31, 2024
displayName: 'GLM-3-Turbo',
functionCall: true,
id: 'glm-3-turbo',
tokens: 128_000,
},
{
description: 'CodeGeeX是一款强大的AI编程助手,提供智能问答和代码补全功能,支持多种编程语言,帮助开发者提高编程效率。',
description:
'CodeGeeX是一款强大的AI编程助手,提供智能问答和代码补全功能,支持多种编程语言,帮助开发者提高编程效率。',
displayName: 'CodeGeeX-4',
enabled: true,
functionCall: false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
exports[`LobeOpenAI > models > should get models 1`] = `
[
{
"description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务",
"description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
"displayName": "GPT-3.5 Turbo",
"functionCall": true,
"id": "gpt-3.5-turbo",
Expand All @@ -17,7 +17,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
"tokens": 16385,
},
{
"displayName": "GPT-3.5 Turbo 16K (0613)",
"description": "Currently points to gpt-3.5-turbo-16k-0613",
"id": "gpt-3.5-turbo-16k-0613",
"legacy": true,
"tokens": 16385,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ Updated by OpenAI to point to the [latest version of GPT-3.5](/models?q=openai/g
Usage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).",
"displayName": "Google: Gemma 7B",
"enabled": true,
"enabled": false,
"functionCall": false,
"id": "google/gemma-7b-it",
"maxTokens": undefined,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ exports[`LobeTogetherAI > models > should get models 1`] = `
{
"description": "Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen.",
"displayName": "Qwen 1.5 Chat (14B)",
"enabled": true,
"enabled": false,
"functionCall": false,
"id": "Qwen/Qwen1.5-14B-Chat",
"maxOutput": 32768,
Expand All @@ -145,7 +145,7 @@ exports[`LobeTogetherAI > models > should get models 1`] = `
{
"description": "Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen.",
"displayName": "Qwen 1.5 Chat (32B)",
"enabled": false,
"enabled": true,
"functionCall": false,
"id": "Qwen/Qwen1.5-32B-Chat",
"maxOutput": 32768,
Expand Down Expand Up @@ -335,7 +335,7 @@ exports[`LobeTogetherAI > models > should get models 1`] = `
{
"description": "Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models.",
"displayName": "Gemma Instruct (7B)",
"enabled": true,
"enabled": false,
"functionCall": false,
"id": "google/gemma-7b-it",
"maxOutput": 8192,
Expand Down
Loading

0 comments on commit ef54191

Please sign in to comment.