diff --git a/src/app/(backend)/webapi/chat/anthropic/route.ts b/src/app/(backend)/webapi/chat/anthropic/route.ts index ecdcb337e631..4a9a2628bd26 100644 --- a/src/app/(backend)/webapi/chat/anthropic/route.ts +++ b/src/app/(backend)/webapi/chat/anthropic/route.ts @@ -1,17 +1,5 @@ import { POST as UniverseRoute } from '../[provider]/route'; -// due to the Chinese region does not support accessing Google -// we need to use proxy to access it -// refs: https://github.com/google/generative-ai-js/issues/29#issuecomment-1866246513 -// if (process.env.HTTP_PROXY_URL) { -// const { setGlobalDispatcher, ProxyAgent } = require('undici'); -// -// console.log(process.env.HTTP_PROXY_URL) -// setGlobalDispatcher(new ProxyAgent({ uri: process.env.HTTP_PROXY_URL })); -// } - -// but undici only can be used in NodeJS -// so if you want to use with proxy, you need comment the code below export const runtime = 'edge'; export const preferredRegion = [ diff --git a/src/app/(backend)/webapi/chat/google/route.ts b/src/app/(backend)/webapi/chat/google/route.ts index 73d8c564fdaf..646425a70cee 100644 --- a/src/app/(backend)/webapi/chat/google/route.ts +++ b/src/app/(backend)/webapi/chat/google/route.ts @@ -1,16 +1,5 @@ import { POST as UniverseRoute } from '../[provider]/route'; -// due to the Chinese region does not support accessing Google -// we need to use proxy to access it -// refs: https://github.com/google/generative-ai-js/issues/29#issuecomment-1866246513 -// if (process.env.HTTP_PROXY_URL) { -// const { setGlobalDispatcher, ProxyAgent } = require('undici'); -// -// setGlobalDispatcher(new ProxyAgent({ uri: process.env.HTTP_PROXY_URL })); -// } - -// but undici only can be used in NodeJS -// so if you want to use with proxy, you need comment the code below export const runtime = 'edge'; // due to Gemini-1.5-pro is not available in Hong Kong, we need to set the preferred region to exclude "Hong Kong (hkg1)". diff --git a/src/app/(backend)/webapi/chat/wenxin/route.ts b/src/app/(backend)/webapi/chat/wenxin/route.ts index a459fd5fe04b..2d4ca0074ac9 100644 --- a/src/app/(backend)/webapi/chat/wenxin/route.ts +++ b/src/app/(backend)/webapi/chat/wenxin/route.ts @@ -1,5 +1,5 @@ import { getLLMConfig } from '@/config/llm'; -import { AgentRuntime } from '@/libs/agent-runtime'; +import { AgentRuntime, ModelProvider } from '@/libs/agent-runtime'; import LobeWenxinAI from '@/libs/agent-runtime/wenxin'; import { POST as UniverseRoute } from '../[provider]/route'; @@ -26,5 +26,5 @@ export const POST = async (req: Request) => return new AgentRuntime(instance); }, - params: { provider: 'wenxin' }, + params: { provider: ModelProvider.Wenxin }, }); diff --git a/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx b/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx index c64c6571f584..a2ebcd7d82cb 100644 --- a/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx +++ b/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx @@ -116,6 +116,7 @@ const ProviderConfig = memo( className, name, showAceGcm = true, + showChecker = true, extra, }) => { const { t } = useTranslation('setting'); @@ -219,12 +220,14 @@ const ProviderConfig = memo( label: t('llm.modelList.title'), name: [LLMProviderConfigKey, id, LLMProviderModelListKey], }, - checkerItem ?? { - children: , - desc: t('llm.checker.desc'), - label: t('llm.checker.title'), - minWidth: undefined, - }, + showChecker + ? (checkerItem ?? { + children: , + desc: t('llm.checker.desc'), + label: t('llm.checker.title'), + minWidth: undefined, + }) + : undefined, showAceGcm && isServerMode && aceGcmItem, ].filter(Boolean) as FormItemProps[]; diff --git a/src/server/globalConfig/index.ts b/src/server/globalConfig/index.ts index 09b458389310..7c4704cda103 100644 --- a/src/server/globalConfig/index.ts +++ b/src/server/globalConfig/index.ts @@ -103,7 +103,7 @@ export const getServerGlobalConfig = () => { ENABLED_AI21, AI21_MODEL_LIST, - + ENABLED_AI360, AI360_MODEL_LIST, diff --git a/src/server/modules/AgentRuntime/index.ts b/src/server/modules/AgentRuntime/index.ts index 2043e97423fe..0fc0cfe557e1 100644 --- a/src/server/modules/AgentRuntime/index.ts +++ b/src/server/modules/AgentRuntime/index.ts @@ -266,8 +266,12 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => { case ModelProvider.SenseNova: { const { SENSENOVA_ACCESS_KEY_ID, SENSENOVA_ACCESS_KEY_SECRET } = getLLMConfig(); - const sensenovaAccessKeyID = apiKeyManager.pick(payload?.sensenovaAccessKeyID || SENSENOVA_ACCESS_KEY_ID); - const sensenovaAccessKeySecret = apiKeyManager.pick(payload?.sensenovaAccessKeySecret || SENSENOVA_ACCESS_KEY_SECRET); + const sensenovaAccessKeyID = apiKeyManager.pick( + payload?.sensenovaAccessKeyID || SENSENOVA_ACCESS_KEY_ID, + ); + const sensenovaAccessKeySecret = apiKeyManager.pick( + payload?.sensenovaAccessKeySecret || SENSENOVA_ACCESS_KEY_SECRET, + ); const apiKey = sensenovaAccessKeyID + ':' + sensenovaAccessKeySecret; diff --git a/src/types/llm.ts b/src/types/llm.ts index ece6daf0ad04..95e86af7c9c9 100644 --- a/src/types/llm.ts +++ b/src/types/llm.ts @@ -122,6 +122,10 @@ export interface ModelProviderCard { * so provider like ollama don't need api key field */ showApiKey?: boolean; + /** + * whether show checker in the provider config + */ + showChecker?: boolean; /** * whether to smoothing the output */