From b5967d91ea90dfc1f8dad0bac0fc258933f97fe7 Mon Sep 17 00:00:00 2001 From: CanisMinor Date: Thu, 12 Sep 2024 19:37:33 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E2=9C=85=20test:=20Update=20test=20for=20n?= =?UTF-8?q?ew=20provider=20info=20(#3924)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ✅ test: Update test for new provider info * ✅ test: Update test --- .../openai/__snapshots__/index.test.ts.snap | 19 +++++++---- .../FromV3ToV4/fixtures/ollama-output-v4.json | 1 + .../config/__snapshots__/index.test.ts.snap | 28 +++++++++++++--- src/server/routers/edge/config/index.test.ts | 14 ++------ .../__snapshots__/action.test.ts.snap | 12 +++++++ .../user/slices/modelList/action.test.ts | 10 ++---- .../__snapshots__/parseModels.test.ts.snap | 32 +++++++++++++++++++ src/utils/parseModels.test.ts | 29 +---------------- 8 files changed, 89 insertions(+), 56 deletions(-) create mode 100644 src/store/user/slices/modelList/__snapshots__/action.test.ts.snap diff --git a/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap b/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap index 8f6a97d67eec..81287865943e 100644 --- a/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +++ b/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap @@ -14,7 +14,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "tokens": 16385, }, { - "description": "Currently points to gpt-3.5-turbo-16k-0613", + "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125", "displayName": "GPT-3.5 Turbo 16K", "id": "gpt-3.5-turbo-16k", "legacy": true, @@ -25,7 +25,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "tokens": 16385, }, { - "description": "Currently points to gpt-3.5-turbo-16k-0613", + "description": "GPT-3.5 Turbo 是 OpenAI 的一款基础模型,结合了高效性和经济性,广泛用于文本生成、理解和分析,专为指导性提示进行调整,去除了与聊天相关的优化。", "id": "gpt-3.5-turbo-16k-0613", "legacy": true, "pricing": { @@ -35,7 +35,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "tokens": 16385, }, { - "displayName": "GPT-4 Turbo Vision Preview (1106)", + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", "id": "gpt-4-1106-vision-preview", "pricing": { "input": 10, @@ -48,6 +48,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "id": "gpt-3.5-turbo-instruct-0914", }, { + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", "displayName": "GPT-4 Turbo Preview (0125)", "functionCall": true, "id": "gpt-4-0125-preview", @@ -58,7 +59,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "tokens": 128000, }, { - "description": "Currently points to gpt-4-0125-preview", + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", "displayName": "GPT-4 Turbo Preview", "functionCall": true, "id": "gpt-4-turbo-preview", @@ -69,6 +70,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "tokens": 128000, }, { + "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125", "displayName": "GPT-3.5 Turbo Instruct", "id": "gpt-3.5-turbo-instruct", "pricing": { @@ -81,6 +83,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "id": "gpt-3.5-turbo-0301", }, { + "description": "GPT-3.5 Turbo 是 OpenAI 的一款基础模型,结合了高效性和经济性,广泛用于文本生成、理解和分析,专为指导性提示进行调整,去除了与聊天相关的优化。", "displayName": "GPT-3.5 Turbo (0613)", "id": "gpt-3.5-turbo-0613", "legacy": true, @@ -91,6 +94,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "tokens": 4096, }, { + "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125", "displayName": "GPT-3.5 Turbo (1106)", "functionCall": true, "id": "gpt-3.5-turbo-1106", @@ -101,6 +105,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "tokens": 16385, }, { + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", "displayName": "GPT-4 Turbo Preview (1106)", "functionCall": true, "id": "gpt-4-1106-preview", @@ -111,7 +116,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "tokens": 128000, }, { - "description": "Currently points to gpt-4-1106-vision-preview", + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", "displayName": "GPT-4 Turbo Vision Preview", "id": "gpt-4-vision-preview", "pricing": { @@ -122,7 +127,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "vision": true, }, { - "description": "Currently points to gpt-4-0613", + "description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。", "displayName": "GPT-4", "functionCall": true, "id": "gpt-4", @@ -133,6 +138,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "tokens": 8192, }, { + "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125", "displayName": "GPT-3.5 Turbo (0125)", "functionCall": true, "id": "gpt-3.5-turbo-0125", @@ -143,6 +149,7 @@ exports[`LobeOpenAI > models > should get models 1`] = ` "tokens": 16385, }, { + "description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。", "displayName": "GPT-4 (0613)", "functionCall": true, "id": "gpt-4-0613", diff --git a/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json b/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json index c3f5e996d908..5b9a9eb2afeb 100644 --- a/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json +++ b/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json @@ -45,6 +45,7 @@ "endpoint": "", "customModelCards": [ { + "description": "LLaVA 是结合视觉编码器和 Vicuna 的多模态模型,用于强大的视觉和语言理解。", "displayName": "LLaVA 7B", "enabled": true, "id": "llava", diff --git a/src/server/routers/edge/config/__snapshots__/index.test.ts.snap b/src/server/routers/edge/config/__snapshots__/index.test.ts.snap index 61a7f4eae096..1931595af8e8 100644 --- a/src/server/routers/edge/config/__snapshots__/index.test.ts.snap +++ b/src/server/routers/edge/config/__snapshots__/index.test.ts.snap @@ -20,6 +20,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST "id": "claude-2", }, { + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", "displayName": "gpt-4-32k", "enabled": true, "functionCall": true, @@ -37,6 +38,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST > should work correct with gpt-4 1`] = ` [ { + "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125", "displayName": "GPT-3.5 Turbo (1106)", "enabled": true, "functionCall": true, @@ -60,7 +62,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST "tokens": 16385, }, { - "description": "Currently points to gpt-3.5-turbo-16k-0613", + "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125", "displayName": "GPT-3.5 Turbo 16K", "enabled": true, "id": "gpt-3.5-turbo-16k", @@ -72,7 +74,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST "tokens": 16385, }, { - "description": "Currently points to gpt-4-0613", + "description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。", "displayName": "GPT-4", "enabled": true, "functionCall": true, @@ -84,7 +86,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST "tokens": 8192, }, { - "description": "Currently points to gpt-4-32k-0613", + "description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。", "displayName": "GPT-4 32K", "enabled": true, "functionCall": true, @@ -96,6 +98,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST "tokens": 32768, }, { + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", "displayName": "GPT-4 Turbo Preview (1106)", "enabled": true, "functionCall": true, @@ -107,7 +110,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST "tokens": 128000, }, { - "description": "Currently points to gpt-4-1106-vision-preview", + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", "displayName": "GPT-4 Turbo Vision Preview", "enabled": true, "id": "gpt-4-vision-preview", @@ -121,6 +124,21 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST ] `; +exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST > show the hidden model 1`] = ` +{ + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", + "displayName": "GPT-4 Turbo Preview (1106)", + "enabled": true, + "functionCall": true, + "id": "gpt-4-1106-preview", + "pricing": { + "input": 10, + "output": 30, + }, + "tokens": 128000, +} +`; + exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_LIST > custom deletion, addition, and renaming of models 1`] = ` { "enabled": false, @@ -130,6 +148,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_ ], "serverModelCards": [ { + "description": "Google 的 Gemma 7B 具有出色的计算效率,适适用于多种硬件架构,如GPU和TPU。", "displayName": "Google: Gemma 7B (free)", "enabled": true, "functionCall": false, @@ -138,6 +157,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_ "vision": false, }, { + "description": "Mistral 7B Instruct 是一款高效的多语言模型,优化用于对话和问答,能在资源受限的环境中表现出色。", "displayName": "Mistral 7B Instruct (free)", "enabled": true, "functionCall": false, diff --git a/src/server/routers/edge/config/index.test.ts b/src/server/routers/edge/config/index.test.ts index 7b5bb041d9ca..8ec585f1f6ba 100644 --- a/src/server/routers/edge/config/index.test.ts +++ b/src/server/routers/edge/config/index.test.ts @@ -90,17 +90,9 @@ describe('configRouter', () => { const result = response.languageModel?.openai?.serverModelCards; - expect(result?.find((o) => o.id === 'gpt-4-1106-preview')).toEqual({ - displayName: 'GPT-4 Turbo Preview (1106)', - functionCall: true, - enabled: true, - id: 'gpt-4-1106-preview', - tokens: 128000, - pricing: { - input: 10, - output: 30, - }, - }); + const model = result?.find((o) => o.id === 'gpt-4-1106-preview'); + + expect(model).toMatchSnapshot(); process.env.OPENAI_MODEL_LIST = ''; }); diff --git a/src/store/user/slices/modelList/__snapshots__/action.test.ts.snap b/src/store/user/slices/modelList/__snapshots__/action.test.ts.snap new file mode 100644 index 000000000000..d1b25b895c83 --- /dev/null +++ b/src/store/user/slices/modelList/__snapshots__/action.test.ts.snap @@ -0,0 +1,12 @@ +// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html + +exports[`LLMSettingsSliceAction > refreshModelProviderList > visible 1`] = ` +{ + "description": "LLaVA 是结合视觉编码器和 Vicuna 的多模态模型,用于强大的视觉和语言理解。", + "displayName": "LLaVA 7B", + "enabled": true, + "id": "llava", + "tokens": 4096, + "vision": true, +} +`; diff --git a/src/store/user/slices/modelList/action.test.ts b/src/store/user/slices/modelList/action.test.ts index 8423b305d44a..ce11298ce9a0 100644 --- a/src/store/user/slices/modelList/action.test.ts +++ b/src/store/user/slices/modelList/action.test.ts @@ -154,13 +154,9 @@ describe('LLMSettingsSliceAction', () => { const ollamaList = result.current.modelProviderList.find((r) => r.id === 'ollama'); // Assert that setModelProviderConfig was not called - expect(ollamaList?.chatModels.find((c) => c.id === 'llava')).toEqual({ - displayName: 'LLaVA 7B', - enabled: true, - id: 'llava', - tokens: 4096, - vision: true, - }); + const model = ollamaList?.chatModels.find((c) => c.id === 'llava'); + + expect(model).toMatchSnapshot(); }); it('modelProviderListForModelSelect should return only enabled providers', () => { diff --git a/src/utils/__snapshots__/parseModels.test.ts.snap b/src/utils/__snapshots__/parseModels.test.ts.snap index cc974427007d..0fc16033775a 100644 --- a/src/utils/__snapshots__/parseModels.test.ts.snap +++ b/src/utils/__snapshots__/parseModels.test.ts.snap @@ -61,3 +61,35 @@ exports[`parseModelString > only add the model 1`] = ` "removed": [], } `; + +exports[`transformToChatModelCards > should have file with builtin models like gpt-4-0125-preview 1`] = ` +[ + { + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", + "displayName": "ChatGPT-4", + "enabled": true, + "files": true, + "functionCall": true, + "id": "gpt-4-0125-preview", + "pricing": { + "input": 10, + "output": 30, + }, + "tokens": 128000, + }, + { + "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。", + "displayName": "ChatGPT-4 Vision", + "enabled": true, + "files": true, + "functionCall": true, + "id": "gpt-4-turbo-2024-04-09", + "pricing": { + "input": 10, + "output": 30, + }, + "tokens": 128000, + "vision": true, + }, +] +`; diff --git a/src/utils/parseModels.test.ts b/src/utils/parseModels.test.ts index 9bc2c6058b60..9f418413cb6e 100644 --- a/src/utils/parseModels.test.ts +++ b/src/utils/parseModels.test.ts @@ -266,33 +266,6 @@ describe('transformToChatModelCards', () => { defaultChatModels: OpenAIProviderCard.chatModels, }); - expect(result).toEqual([ - { - displayName: 'ChatGPT-4', - files: true, - functionCall: true, - enabled: true, - id: 'gpt-4-0125-preview', - tokens: 128000, - pricing: { - input: 10, - output: 30, - }, - }, - { - description: 'GPT-4 Turbo 视觉版 (240409)', - displayName: 'ChatGPT-4 Vision', - files: true, - functionCall: true, - enabled: true, - id: 'gpt-4-turbo-2024-04-09', - tokens: 128000, - vision: true, - pricing: { - input: 10, - output: 30, - }, - }, - ]); + expect(result).toMatchSnapshot(); }); }); From 028650b985c0e61c78f16e4ee6160568edca56f5 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Thu, 12 Sep 2024 11:44:26 +0000 Subject: [PATCH 2/2] :bookmark: chore(release): v1.16.9 [skip ci] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### [Version 1.16.9](https://github.com/lobehub/lobe-chat/compare/v1.16.8...v1.16.9) Released on **2024-09-12** #### 💄 Styles - **misc**: Add model and provider desc and url.
Improvements and Fixes #### Styles * **misc**: Add model and provider desc and url, closes [#3920](https://github.com/lobehub/lobe-chat/issues/3920) ([ea9ff00](https://github.com/lobehub/lobe-chat/commit/ea9ff00))
[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
--- CHANGELOG.md | 25 +++++++++++++++++++++++++ package.json | 2 +- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f966304f6e96..f4623522b362 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,31 @@ # Changelog +### [Version 1.16.9](https://github.com/lobehub/lobe-chat/compare/v1.16.8...v1.16.9) + +Released on **2024-09-12** + +#### 💄 Styles + +- **misc**: Add model and provider desc and url. + +
+ +
+Improvements and Fixes + +#### Styles + +- **misc**: Add model and provider desc and url, closes [#3920](https://github.com/lobehub/lobe-chat/issues/3920) ([ea9ff00](https://github.com/lobehub/lobe-chat/commit/ea9ff00)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ ### [Version 1.16.8](https://github.com/lobehub/lobe-chat/compare/v1.16.7...v1.16.8) Released on **2024-09-12** diff --git a/package.json b/package.json index 907a92a3097d..975edfa7c342 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@lobehub/chat", - "version": "1.16.8", + "version": "1.16.9", "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.", "keywords": [ "framework",