Skip to content

Commit

Permalink
🚧 wip: add lm studio
Browse files Browse the repository at this point in the history
  • Loading branch information
arvinxx committed Nov 4, 2024
1 parent 6ff7089 commit d5d285e
Show file tree
Hide file tree
Showing 11 changed files with 331 additions and 3 deletions.
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@
"@langchain/community": "^0.3.0",
"@lobehub/chat-plugin-sdk": "^1.32.4",
"@lobehub/chat-plugins-gateway": "^1.9.0",
"@lobehub/icons": "^1.35.4",
"@lobehub/icons": "^1.37.0",
"@lobehub/tts": "^1.25.1",
"@lobehub/ui": "^1.152.0",
"@neondatabase/serverless": "^0.10.1",
Expand Down
4 changes: 3 additions & 1 deletion src/app/(main)/settings/llm/ProviderList/providers.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import {
GoogleProviderCard,
GroqProviderCard,
HunyuanProviderCard,
LMStudioProviderCard,
MinimaxProviderCard,
MistralProviderCard,
MoonshotProviderCard,
Expand All @@ -34,8 +35,8 @@ import { useGithubProvider } from './Github';
import { useHuggingFaceProvider } from './HuggingFace';
import { useOllamaProvider } from './Ollama';
import { useOpenAIProvider } from './OpenAI';
import { useWenxinProvider } from './Wenxin';
import { useSenseNovaProvider } from './SenseNova';
import { useWenxinProvider } from './Wenxin';

export const useProviderList = (): ProviderItem[] => {
const AzureProvider = useAzureProvider();
Expand Down Expand Up @@ -74,6 +75,7 @@ export const useProviderList = (): ProviderItem[] => {
ZhiPuProviderCard,
ZeroOneProviderCard,
SenseNovaProvider,
LMStudioProviderCard,
StepfunProviderCard,
MoonshotProviderCard,
BaichuanProviderCard,
Expand Down
4 changes: 4 additions & 0 deletions src/config/modelProviders/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import GoogleProvider from './google';
import GroqProvider from './groq';
import HuggingFaceProvider from './huggingface';
import HunyuanProvider from './hunyuan';
import LMStudioProvider from './lmstudio';
import MinimaxProvider from './minimax';
import MistralProvider from './mistral';
import MoonshotProvider from './moonshot';
Expand Down Expand Up @@ -65,6 +66,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
HunyuanProvider.chatModels,
WenxinProvider.chatModels,
SenseNovaProvider.chatModels,
LMStudioProvider.chatModels,
].flat();

export const DEFAULT_MODEL_PROVIDER_LIST = [
Expand Down Expand Up @@ -100,6 +102,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
Ai360Provider,
TaichuProvider,
SiliconCloudProvider,
LMStudioProvider,
];

export const filterEnabledModels = (provider: ModelProviderCard) => {
Expand All @@ -124,6 +127,7 @@ export { default as GoogleProviderCard } from './google';
export { default as GroqProviderCard } from './groq';
export { default as HuggingFaceProviderCard } from './huggingface';
export { default as HunyuanProviderCard } from './hunyuan';
export { default as LMStudioProviderCard } from './lmstudio';
export { default as MinimaxProviderCard } from './minimax';
export { default as MistralProviderCard } from './mistral';
export { default as MoonshotProviderCard } from './moonshot';
Expand Down
35 changes: 35 additions & 0 deletions src/config/modelProviders/lmstudio.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import { ModelProviderCard } from '@/types/llm';

// ref: https://ollama.com/library
const LMStudio: ModelProviderCard = {
chatModels: [
{
description:
'Llama 3.1 是 Meta 推出的领先模型,支持高达 405B 参数,可应用于复杂对话、多语言翻译和数据分析领域。',
displayName: 'Llama 3.1 8B',
enabled: true,
id: 'llama3.1',
tokens: 128_000,
},
{
description: 'Qwen2.5 是阿里巴巴的新一代大规模语言模型,以优异的性能支持多元化的应用需求。',
displayName: 'Qwen2.5 14B',
enabled: true,
id: 'qwen2.5-14b-instruct',
tokens: 128_000,
},
],
defaultShowBrowserRequest: true,
id: 'lmstudio',
modelList: { showModelFetcher: true },
modelsUrl: 'https://lmstudio.ai/models',
name: 'LM Studio',
showApiKey: false,
smoothing: {
speed: 2,
text: true,
},
url: 'https://lmstudio.ai',
};

export default LMStudio;
5 changes: 5 additions & 0 deletions src/const/settings/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import {
GroqProviderCard,
HuggingFaceProviderCard,
HunyuanProviderCard,
LMStudioProviderCard,
MinimaxProviderCard,
MistralProviderCard,
MoonshotProviderCard,
Expand Down Expand Up @@ -87,6 +88,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
enabled: false,
enabledModels: filterEnabledModels(HunyuanProviderCard),
},
lmstudio: {
enabled: false,
enabledModels: filterEnabledModels(LMStudioProviderCard),
},
minimax: {
enabled: false,
enabledModels: filterEnabledModels(MinimaxProviderCard),
Expand Down
7 changes: 7 additions & 0 deletions src/libs/agent-runtime/AgentRuntime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import { LobeGoogleAI } from './google';
import { LobeGroq } from './groq';
import { LobeHuggingFaceAI } from './huggingface';
import { LobeHunyuanAI } from './hunyuan';
import { LobeLMStudioAI } from './lmstudio';
import { LobeMinimaxAI } from './minimax';
import { LobeMistralAI } from './mistral';
import { LobeMoonshotAI } from './moonshot';
Expand Down Expand Up @@ -138,6 +139,7 @@ class AgentRuntime {
groq: Partial<ClientOptions>;
huggingface: { apiKey?: string; baseURL?: string };
hunyuan: Partial<ClientOptions>;
lmstudio: Partial<ClientOptions>;
minimax: Partial<ClientOptions>;
mistral: Partial<ClientOptions>;
moonshot: Partial<ClientOptions>;
Expand Down Expand Up @@ -197,6 +199,11 @@ class AgentRuntime {
break;
}

case ModelProvider.LMStudio: {
runtimeModel = new LobeLMStudioAI(params.lmstudio);
break;
}

case ModelProvider.Ollama: {
runtimeModel = new LobeOllamaAI(params.ollama);
break;
Expand Down
Loading

0 comments on commit d5d285e

Please sign in to comment.