Skip to content

Commit

Permalink
💄 style: add lm studio
Browse files Browse the repository at this point in the history
  • Loading branch information
arvinxx committed Nov 26, 2024
1 parent 66c820b commit ad3b7a1
Show file tree
Hide file tree
Showing 10 changed files with 328 additions and 2 deletions.
4 changes: 3 additions & 1 deletion src/app/(main)/settings/llm/ProviderList/providers.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import {
GroqProviderCard,
HunyuanProviderCard,
InternLMProviderCard,
LMStudioProviderCard,
MinimaxProviderCard,
MistralProviderCard,
MoonshotProviderCard,
Expand All @@ -38,8 +39,8 @@ import { useGithubProvider } from './Github';
import { useHuggingFaceProvider } from './HuggingFace';
import { useOllamaProvider } from './Ollama';
import { useOpenAIProvider } from './OpenAI';
import { useWenxinProvider } from './Wenxin';
import { useSenseNovaProvider } from './SenseNova';
import { useWenxinProvider } from './Wenxin';

export const useProviderList = (): ProviderItem[] => {
const AzureProvider = useAzureProvider();
Expand Down Expand Up @@ -81,6 +82,7 @@ export const useProviderList = (): ProviderItem[] => {
ZhiPuProviderCard,
ZeroOneProviderCard,
SenseNovaProvider,
LMStudioProviderCard,
StepfunProviderCard,
MoonshotProviderCard,
BaichuanProviderCard,
Expand Down
4 changes: 4 additions & 0 deletions src/config/modelProviders/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import GroqProvider from './groq';
import HuggingFaceProvider from './huggingface';
import HunyuanProvider from './hunyuan';
import InternLMProvider from './internlm';
import LMStudioProvider from './lmstudio';
import MinimaxProvider from './minimax';
import MistralProvider from './mistral';
import MoonshotProvider from './moonshot';
Expand Down Expand Up @@ -73,6 +74,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
WenxinProvider.chatModels,
SenseNovaProvider.chatModels,
InternLMProvider.chatModels,
LMStudioProvider.chatModels,
].flat();

export const DEFAULT_MODEL_PROVIDER_LIST = [
Expand Down Expand Up @@ -112,6 +114,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
InternLMProvider,
SiliconCloudProvider,
GiteeAIProvider,
LMStudioProvider,
];

export const filterEnabledModels = (provider: ModelProviderCard) => {
Expand Down Expand Up @@ -139,6 +142,7 @@ export { default as GroqProviderCard } from './groq';
export { default as HuggingFaceProviderCard } from './huggingface';
export { default as HunyuanProviderCard } from './hunyuan';
export { default as InternLMProviderCard } from './internlm';
export { default as LMStudioProviderCard } from './lmstudio';
export { default as MinimaxProviderCard } from './minimax';
export { default as MistralProviderCard } from './mistral';
export { default as MoonshotProviderCard } from './moonshot';
Expand Down
38 changes: 38 additions & 0 deletions src/config/modelProviders/lmstudio.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import { ModelProviderCard } from '@/types/llm';

// ref: https://ollama.com/library
const LMStudio: ModelProviderCard = {
chatModels: [
{
description:
'Llama 3.1 是 Meta 推出的领先模型,支持高达 405B 参数,可应用于复杂对话、多语言翻译和数据分析领域。',
displayName: 'Llama 3.1 8B',
enabled: true,
id: 'llama3.1',
tokens: 128_000,
},
{
description: 'Qwen2.5 是阿里巴巴的新一代大规模语言模型,以优异的性能支持多元化的应用需求。',
displayName: 'Qwen2.5 14B',
enabled: true,
id: 'qwen2.5-14b-instruct',
tokens: 128_000,
},
],
defaultShowBrowserRequest: true,
id: 'lmstudio',
modelList: { showModelFetcher: true },
modelsUrl: 'https://lmstudio.ai/models',
name: 'LM Studio',
proxyUrl: {
placeholder: 'http://127.0.0.1:1234/v1',
},
showApiKey: false,
smoothing: {
speed: 2,
text: true,
},
url: 'https://lmstudio.ai',
};

export default LMStudio;
4 changes: 4 additions & 0 deletions src/config/modelProviders/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,10 @@ const Ollama: ModelProviderCard = {
modelsUrl: 'https://ollama.com/library',
name: 'Ollama',
showApiKey: false,
smoothing: {
speed: 2,
text: true,
},
url: 'https://ollama.com',
};

Expand Down
5 changes: 4 additions & 1 deletion src/const/settings/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,14 @@ import { ModelProvider } from '@/libs/agent-runtime';
import { genUserLLMConfig } from '@/utils/genUserLLMConfig'

export const DEFAULT_LLM_CONFIG = genUserLLMConfig({
lmstudio: {
fetchOnClient: true,
},
ollama: {
enabled: true,
fetchOnClient: true,
},
openai: {
openai: {
enabled: true,
},
});
Expand Down
7 changes: 7 additions & 0 deletions src/libs/agent-runtime/AgentRuntime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import { LobeGroq } from './groq';
import { LobeHuggingFaceAI } from './huggingface';
import { LobeHunyuanAI } from './hunyuan';
import { LobeInternLMAI } from './internlm';
import { LobeLMStudioAI } from './lmstudio';
import { LobeMinimaxAI } from './minimax';
import { LobeMistralAI } from './mistral';
import { LobeMoonshotAI } from './moonshot';
Expand Down Expand Up @@ -145,6 +146,7 @@ class AgentRuntime {
huggingface: { apiKey?: string; baseURL?: string };
hunyuan: Partial<ClientOptions>;
internlm: Partial<ClientOptions>;
lmstudio: Partial<ClientOptions>;
minimax: Partial<ClientOptions>;
mistral: Partial<ClientOptions>;
moonshot: Partial<ClientOptions>;
Expand Down Expand Up @@ -205,6 +207,11 @@ class AgentRuntime {
break;
}

case ModelProvider.LMStudio: {
runtimeModel = new LobeLMStudioAI(params.lmstudio);
break;
}

case ModelProvider.Ollama: {
runtimeModel = new LobeOllamaAI(params.ollama);
break;
Expand Down
Loading

0 comments on commit ad3b7a1

Please sign in to comment.