From d5a33a0323ad5847389d58262fe365de55012fcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=86=E8=90=8C=E9=97=B7=E6=B2=B9=E7=93=B6?= <253605712@qq.com> Date: Wed, 22 May 2024 11:02:43 +0800 Subject: [PATCH 1/9] feat:add gpt-4o for azure (#4568) --- .../model_providers/azure_openai/_constant.py | 152 ++++++++++++++++++ .../azure_openai/azure_openai.yaml | 12 ++ 2 files changed, 164 insertions(+) diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py index 26ce8586793576..707b199417eb11 100644 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ b/api/core/model_runtime/model_providers/azure_openai/_constant.py @@ -482,6 +482,158 @@ class AzureBaseModel(BaseModel): ) ) ), + AzureBaseModel( + base_model_name='gpt-4o', + entity=AIModelEntity( + model='fake-deployment-name', + label=I18nObject( + en_US='fake-deployment-name-label', + ), + model_type=ModelType.LLM, + features=[ + ModelFeature.AGENT_THOUGHT, + ModelFeature.VISION, + ModelFeature.MULTI_TOOL_CALL, + ModelFeature.STREAM_TOOL_CALL, + ], + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 128000, + }, + parameter_rules=[ + ParameterRule( + name='temperature', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], + ), + ParameterRule( + name='top_p', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], + ), + ParameterRule( + name='presence_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], + ), + ParameterRule( + name='frequency_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], + ), + _get_max_tokens(default=512, min_val=1, max_val=4096), + ParameterRule( + name='seed', + label=I18nObject( + zh_Hans='种子', + en_US='Seed' + ), + type='int', + help=I18nObject( + zh_Hans='如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。', + en_US='If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.' + ), + required=False, + precision=2, + min=0, + max=1, + ), + ParameterRule( + name='response_format', + label=I18nObject( + zh_Hans='回复格式', + en_US='response_format' + ), + type='string', + help=I18nObject( + zh_Hans='指定模型必须输出的格式', + en_US='specifying the format that the model must output' + ), + required=False, + options=['text', 'json_object'] + ), + ], + pricing=PriceConfig( + input=5.00, + output=15.00, + unit=0.000001, + currency='USD', + ) + ) + ), + AzureBaseModel( + base_model_name='gpt-4o-2024-05-13', + entity=AIModelEntity( + model='fake-deployment-name', + label=I18nObject( + en_US='fake-deployment-name-label', + ), + model_type=ModelType.LLM, + features=[ + ModelFeature.AGENT_THOUGHT, + ModelFeature.VISION, + ModelFeature.MULTI_TOOL_CALL, + ModelFeature.STREAM_TOOL_CALL, + ], + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 128000, + }, + parameter_rules=[ + ParameterRule( + name='temperature', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], + ), + ParameterRule( + name='top_p', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], + ), + ParameterRule( + name='presence_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], + ), + ParameterRule( + name='frequency_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], + ), + _get_max_tokens(default=512, min_val=1, max_val=4096), + ParameterRule( + name='seed', + label=I18nObject( + zh_Hans='种子', + en_US='Seed' + ), + type='int', + help=I18nObject( + zh_Hans='如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。', + en_US='If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.' + ), + required=False, + precision=2, + min=0, + max=1, + ), + ParameterRule( + name='response_format', + label=I18nObject( + zh_Hans='回复格式', + en_US='response_format' + ), + type='string', + help=I18nObject( + zh_Hans='指定模型必须输出的格式', + en_US='specifying the format that the model must output' + ), + required=False, + options=['text', 'json_object'] + ), + ], + pricing=PriceConfig( + input=5.00, + output=15.00, + unit=0.000001, + currency='USD', + ) + ) + ), AzureBaseModel( base_model_name='gpt-4-turbo', entity=AIModelEntity( diff --git a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml index 9b2a1169c52361..2f7d6fe97869b1 100644 --- a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml +++ b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml @@ -99,6 +99,18 @@ model_credential_schema: show_on: - variable: __model_type value: llm + - label: + en_US: gpt-4o + value: gpt-4o + show_on: + - variable: __model_type + value: llm + - label: + en_US: gpt-4o-2024-05-13 + value: gpt-4o-2024-05-13 + show_on: + - variable: __model_type + value: llm - label: en_US: gpt-4-turbo value: gpt-4-turbo From ee53f98d8ccbb81fc506a1e8802b5658d6fdbe54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Wed, 22 May 2024 11:15:13 +0800 Subject: [PATCH 2/9] Hide the copy button when there is no content to copy (#4546) --- .../workflow/panel/workflow-preview.tsx | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/web/app/components/workflow/panel/workflow-preview.tsx b/web/app/components/workflow/panel/workflow-preview.tsx index 0f9997f06dd142..f0434db46f7753 100644 --- a/web/app/components/workflow/panel/workflow-preview.tsx +++ b/web/app/components/workflow/panel/workflow-preview.tsx @@ -119,20 +119,21 @@ const WorkflowPreview = () => { error={workflowRunningData?.result?.error} onClick={() => switchTab('DETAIL')} /> - { - const content = workflowRunningData?.resultText - if (typeof content === 'string') - copy(content) - else - copy(JSON.stringify(content)) - Toast.notify({ type: 'success', message: t('common.actionMsg.copySuccessfully') }) - }}> - -
{t('common.operation.copy')}
-
+ {(workflowRunningData?.result.status !== WorkflowRunningStatus.Succeeded || !workflowRunningData?.resultText) && ( + { + const content = workflowRunningData?.resultText + if (typeof content === 'string') + copy(content) + else + copy(JSON.stringify(content)) + Toast.notify({ type: 'success', message: t('common.actionMsg.copySuccessfully') }) + }}> + +
{t('common.operation.copy')}
+
+ )} )} {currentTab === 'DETAIL' && ( From 3efb5fe7e26834ebf76910b8fa159b98af19ecfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Wed, 22 May 2024 11:18:03 +0800 Subject: [PATCH 3/9] Refactor part of the ProviderManager code to improve readability (#4524) --- api/core/provider_manager.py | 79 +++++++++++------------------------- 1 file changed, 23 insertions(+), 56 deletions(-) diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index 0db84d3b6959a6..0281ddad0a0b52 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -105,14 +105,8 @@ def get_configurations(self, tenant_id: str) -> ProviderConfigurations: # Construct ProviderConfiguration objects for each provider for provider_entity in provider_entities: provider_name = provider_entity.provider - - provider_records = provider_name_to_provider_records_dict.get(provider_entity.provider) - if not provider_records: - provider_records = [] - - provider_model_records = provider_name_to_provider_model_records_dict.get(provider_entity.provider) - if not provider_model_records: - provider_model_records = [] + provider_records = provider_name_to_provider_records_dict.get(provider_entity.provider, []) + provider_model_records = provider_name_to_provider_model_records_dict.get(provider_entity.provider, []) # Convert to custom configuration custom_configuration = self._to_custom_configuration( @@ -134,38 +128,24 @@ def get_configurations(self, tenant_id: str) -> ProviderConfigurations: if preferred_provider_type_record: preferred_provider_type = ProviderType.value_of(preferred_provider_type_record.preferred_provider_type) + elif custom_configuration.provider or custom_configuration.models: + preferred_provider_type = ProviderType.CUSTOM + elif system_configuration.enabled: + preferred_provider_type = ProviderType.SYSTEM else: - if custom_configuration.provider or custom_configuration.models: - preferred_provider_type = ProviderType.CUSTOM - elif system_configuration.enabled: - preferred_provider_type = ProviderType.SYSTEM - else: - preferred_provider_type = ProviderType.CUSTOM + preferred_provider_type = ProviderType.CUSTOM using_provider_type = preferred_provider_type + has_valid_quota = any(quota_conf.is_valid for quota_conf in system_configuration.quota_configurations) + if preferred_provider_type == ProviderType.SYSTEM: - if not system_configuration.enabled: + if not system_configuration.enabled or not has_valid_quota: using_provider_type = ProviderType.CUSTOM - has_valid_quota = False - for quota_configuration in system_configuration.quota_configurations: - if quota_configuration.is_valid: - has_valid_quota = True - break - - if not has_valid_quota: - using_provider_type = ProviderType.CUSTOM else: if not custom_configuration.provider and not custom_configuration.models: - if system_configuration.enabled: - has_valid_quota = False - for quota_configuration in system_configuration.quota_configurations: - if quota_configuration.is_valid: - has_valid_quota = True - break - - if has_valid_quota: - using_provider_type = ProviderType.SYSTEM + if system_configuration.enabled and has_valid_quota: + using_provider_type = ProviderType.SYSTEM provider_configuration = ProviderConfiguration( tenant_id=tenant_id, @@ -233,30 +213,17 @@ def get_default_model(self, tenant_id: str, model_type: ModelType) -> Optional[D ) if available_models: - found = False - for available_model in available_models: - if available_model.model == "gpt-4": - default_model = TenantDefaultModel( - tenant_id=tenant_id, - model_type=model_type.to_origin_model_type(), - provider_name=available_model.provider.provider, - model_name=available_model.model - ) - db.session.add(default_model) - db.session.commit() - found = True - break - - if not found: - available_model = available_models[0] - default_model = TenantDefaultModel( - tenant_id=tenant_id, - model_type=model_type.to_origin_model_type(), - provider_name=available_model.provider.provider, - model_name=available_model.model - ) - db.session.add(default_model) - db.session.commit() + available_model = next((model for model in available_models if model.model == "gpt-4"), + available_models[0]) + + default_model = TenantDefaultModel( + tenant_id=tenant_id, + model_type=model_type.to_origin_model_type(), + provider_name=available_model.provider.provider, + model_name=available_model.model + ) + db.session.add(default_model) + db.session.commit() if not default_model: return None From 5b009a5afb570c23ec5633317d1d69c885e676ad Mon Sep 17 00:00:00 2001 From: naporitan Date: Wed, 22 May 2024 15:28:03 +0900 Subject: [PATCH 4/9] chore(api): Use channel from UI as API query parameter (#4562) --- api/core/tools/provider/builtin/youtube/tools/videos.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/tools/provider/builtin/youtube/tools/videos.py b/api/core/tools/provider/builtin/youtube/tools/videos.py index 86160dfa6c3c26..7a9b9fce4a921f 100644 --- a/api/core/tools/provider/builtin/youtube/tools/videos.py +++ b/api/core/tools/provider/builtin/youtube/tools/videos.py @@ -36,7 +36,7 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) \ youtube = build('youtube', 'v3', developerKey=self.runtime.credentials['google_api_key']) # try to get channel id - search_results = youtube.search().list(q='mrbeast', type='channel', order='relevance', part='id').execute() + search_results = youtube.search().list(q=channel, type='channel', order='relevance', part='id').execute() channel_id = search_results['items'][0]['id']['channelId'] start_date, end_date = time_range From 3ab19be9ea772a8cd0f8dd2d88f1505cfb99e29d Mon Sep 17 00:00:00 2001 From: Justin Wu Date: Wed, 22 May 2024 14:28:28 +0800 Subject: [PATCH 5/9] Fix bedrock claude wrong pricing (#4572) Co-authored-by: Justin Wu --- .../bedrock/llm/anthropic.claude-3-haiku-v1.yaml | 4 ++-- .../bedrock/llm/anthropic.claude-3-sonnet-v1.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml index 73fe5567fc266d..181b192769c872 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml +++ b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml @@ -51,7 +51,7 @@ parameter_rules: zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. pricing: - input: '0.003' - output: '0.015' + input: '0.00025' + output: '0.00125' unit: '0.001' currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml index cb11df0b60183c..b782faddbaca4e 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml +++ b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml @@ -50,7 +50,7 @@ parameter_rules: zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. pricing: - input: '0.00025' - output: '0.00125' + input: '0.003' + output: '0.015' unit: '0.001' currency: USD From 24576a39e56c6d3a27914718ba9a8d9542f7c1bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Wed, 22 May 2024 14:28:52 +0800 Subject: [PATCH 6/9] fix: some google search result raise exception (#4567) --- .../tools/provider/builtin/google/tools/google_search.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/api/core/tools/provider/builtin/google/tools/google_search.py b/api/core/tools/provider/builtin/google/tools/google_search.py index 0b1978ad3e4f8b..b5f15464c368eb 100644 --- a/api/core/tools/provider/builtin/google/tools/google_search.py +++ b/api/core/tools/provider/builtin/google/tools/google_search.py @@ -99,8 +99,11 @@ def _process_response(res: dict, typ: str) -> str: ): toret = res["knowledge_graph"]["description"] + "\n" if "snippet" in res["organic_results"][0].keys(): - for item in res["organic_results"]: - toret += "content: " + item["snippet"] + "\n" + "link: " + item["link"] + "\n" + toret = "\n".join( + f"content: {item['snippet']}\nlink: {item['link']}" + for item in res["organic_results"] + if "snippet" in item and "link" in item + ) if ( "images_results" in res.keys() and "thumbnail" in res["images_results"][0].keys() From 4f62541bfb540b70fb3c70cadd3d6ad1c816c227 Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Wed, 22 May 2024 16:42:49 +0800 Subject: [PATCH 7/9] chore: remove model provider free token link (#4579) --- .../model-provider-page/hooks.ts | 25 --------- .../provider-added-card/index.tsx | 3 +- .../provider-added-card/quota-panel.tsx | 24 +------- .../provider-card/index.tsx | 56 +------------------ .../model-provider-page/utils.ts | 1 - web/service/common.ts | 4 -- 6 files changed, 4 insertions(+), 109 deletions(-) diff --git a/web/app/components/header/account-setting/model-provider-page/hooks.ts b/web/app/components/header/account-setting/model-provider-page/hooks.ts index 27f2b15582f8fb..d4ffe8f09b2c05 100644 --- a/web/app/components/header/account-setting/model-provider-page/hooks.ts +++ b/web/app/components/header/account-setting/model-provider-page/hooks.ts @@ -24,7 +24,6 @@ import { fetchModelProviderCredentials, fetchModelProviders, getPayUrl, - submitFreeQuota, } from '@/service/common' import { useProviderContext } from '@/context/provider-context' @@ -202,30 +201,6 @@ export const useAnthropicBuyQuota = () => { return handleGetPayUrl } -export const useFreeQuota = (onSuccess: () => void) => { - const [loading, setLoading] = useState(false) - - const handleClick = async (type: string) => { - if (loading) - return - - try { - setLoading(true) - const res = await submitFreeQuota(`/workspaces/current/model-providers/${type}/free-quota-submit`) - - if (res.type === 'redirect' && res.redirect_url) - window.location.href = res.redirect_url - else if (res.type === 'submit' && res.result === 'success') - onSuccess() - } - finally { - setLoading(false) - } - } - - return handleClick -} - export const useModelProviders = () => { const { data: providersData, mutate, isLoading } = useSWR('/workspaces/current/model-providers', fetchModelProviders) diff --git a/web/app/components/header/account-setting/model-provider-page/provider-added-card/index.tsx b/web/app/components/header/account-setting/model-provider-page/provider-added-card/index.tsx index b8bce1d05c8f0c..8fa464dbe70dc1 100644 --- a/web/app/components/header/account-setting/model-provider-page/provider-added-card/index.tsx +++ b/web/app/components/header/account-setting/model-provider-page/provider-added-card/index.tsx @@ -9,7 +9,6 @@ import type { import { ConfigurateMethodEnum } from '../declarations' import { DEFAULT_BACKGROUND_COLOR, - MODEL_PROVIDER_QUOTA_GET_FREE, MODEL_PROVIDER_QUOTA_GET_PAID, modelTypeFormat, } from '../utils' @@ -43,7 +42,7 @@ const ProviderAddedCard: FC = ({ const configurateMethods = provider.configurate_methods.filter(method => method !== ConfigurateMethodEnum.fetchFromRemote) const systemConfig = provider.system_configuration const hasModelList = fetched && !!modelList.length - const showQuota = systemConfig.enabled && [...MODEL_PROVIDER_QUOTA_GET_FREE, ...MODEL_PROVIDER_QUOTA_GET_PAID].includes(provider.provider) && !IS_CE_EDITION + const showQuota = systemConfig.enabled && [...MODEL_PROVIDER_QUOTA_GET_PAID].includes(provider.provider) && !IS_CE_EDITION const getModelList = async (providerName: string) => { if (loading) diff --git a/web/app/components/header/account-setting/model-provider-page/provider-added-card/quota-panel.tsx b/web/app/components/header/account-setting/model-provider-page/provider-added-card/quota-panel.tsx index a7a9aaf09f4ac5..c00933468ff994 100644 --- a/web/app/components/header/account-setting/model-provider-page/provider-added-card/quota-panel.tsx +++ b/web/app/components/header/account-setting/model-provider-page/provider-added-card/quota-panel.tsx @@ -7,17 +7,10 @@ import { QuotaUnitEnum, } from '../declarations' import { - useAnthropicBuyQuota, - useFreeQuota, - useUpdateModelProviders, -} from '../hooks' -import { - MODEL_PROVIDER_QUOTA_GET_FREE, MODEL_PROVIDER_QUOTA_GET_PAID, } from '../utils' import PriorityUseTip from './priority-use-tip' import { InfoCircle } from '@/app/components/base/icons/src/vender/line/general' -import Button from '@/app/components/base/button' import TooltipPlus from '@/app/components/base/tooltip-plus' import { formatNumber } from '@/utils/format' @@ -28,12 +21,7 @@ const QuotaPanel: FC = ({ provider, }) => { const { t } = useTranslation() - const updateModelProviders = useUpdateModelProviders() - const handlePay = useAnthropicBuyQuota() - const handleFreeQuotaSuccess = () => { - updateModelProviders() - } - const handleFreeQuota = useFreeQuota(handleFreeQuotaSuccess) + const customConfig = provider.custom_configuration const priorityUseType = provider.preferred_provider_type const systemConfig = provider.system_configuration @@ -68,16 +56,6 @@ const QuotaPanel: FC = ({ ) } - { - !currentQuota && MODEL_PROVIDER_QUOTA_GET_FREE.includes(provider.provider) && ( - - ) - } { priorityUseType === PreferredProviderTypeEnum.system && customConfig.status === CustomConfigurationStatusEnum.active && ( diff --git a/web/app/components/header/account-setting/model-provider-page/provider-card/index.tsx b/web/app/components/header/account-setting/model-provider-page/provider-card/index.tsx index 0e5eb7a236c08e..8b3b3c1b67b76b 100644 --- a/web/app/components/header/account-setting/model-provider-page/provider-card/index.tsx +++ b/web/app/components/header/account-setting/model-provider-page/provider-card/index.tsx @@ -2,60 +2,34 @@ import type { FC } from 'react' import { useTranslation } from 'react-i18next' import type { ModelProvider, - TypeWithI18N, } from '../declarations' import { ConfigurateMethodEnum } from '../declarations' import { DEFAULT_BACKGROUND_COLOR, - MODEL_PROVIDER_QUOTA_GET_FREE, modelTypeFormat, } from '../utils' import { - useAnthropicBuyQuota, - useFreeQuota, useLanguage, - useUpdateModelProviders, } from '../hooks' import ModelBadge from '../model-badge' import ProviderIcon from '../provider-icon' import s from './index.module.css' import { Plus, Settings01 } from '@/app/components/base/icons/src/vender/line/general' import Button from '@/app/components/base/button' -import { IS_CE_EDITION } from '@/config' type ProviderCardProps = { provider: ModelProvider onOpenModal: (configurateMethod: ConfigurateMethodEnum) => void } -const TIP_MAP: { [k: string]: TypeWithI18N } = { - minimax: { - en_US: 'Earn 1 million tokens for free', - zh_Hans: '免费获取 100 万个 token', - }, - spark: { - en_US: 'Earn 3 million tokens (v3.0) for free', - zh_Hans: '免费获取 300 万个 token (v3.0)', - }, - zhipuai: { - en_US: 'Earn 10 million tokens for free', - zh_Hans: '免费获取 1000 万个 token', - }, -} const ProviderCard: FC = ({ provider, onOpenModal, }) => { const { t } = useTranslation() const language = useLanguage() - const updateModelProviders = useUpdateModelProviders() - const handlePay = useAnthropicBuyQuota() - const handleFreeQuotaSuccess = () => { - updateModelProviders() - } - const handleFreeQuota = useFreeQuota(handleFreeQuotaSuccess) + const configurateMethods = provider.configurate_methods.filter(method => method !== ConfigurateMethodEnum.fetchFromRemote) - const canGetFreeQuota = MODEL_PROVIDER_QUOTA_GET_FREE.includes(provider.provider) && !IS_CE_EDITION && provider.system_configuration.enabled return (
= ({ }
-
+
{ provider.supported_model_types.map(modelType => ( @@ -81,33 +55,7 @@ const ProviderCard: FC = ({ )) } - { - canGetFreeQuota && ( -
- 📣  -
- {TIP_MAP[provider.provider][language]} -
-
- ) - }
- { - canGetFreeQuota && ( -
- -
- ) - }
{ configurateMethods.map((method) => { diff --git a/web/app/components/header/account-setting/model-provider-page/utils.ts b/web/app/components/header/account-setting/model-provider-page/utils.ts index bba5fccc6d163d..afe0f6dc9a5615 100644 --- a/web/app/components/header/account-setting/model-provider-page/utils.ts +++ b/web/app/components/header/account-setting/model-provider-page/utils.ts @@ -15,7 +15,6 @@ import { validateModelProvider, } from '@/service/common' -export const MODEL_PROVIDER_QUOTA_GET_FREE = ['minimax', 'spark', 'zhipuai'] export const MODEL_PROVIDER_QUOTA_GET_PAID = ['anthropic', 'openai', 'azure_openai'] export const DEFAULT_BACKGROUND_COLOR = '#F3F4F6' diff --git a/web/service/common.ts b/web/service/common.ts index 98fe50488c119a..d7f7c6c8bcc0b8 100644 --- a/web/service/common.ts +++ b/web/service/common.ts @@ -218,10 +218,6 @@ export const fetchModelParameterRules: Fetcher<{ data: ModelParameterRule[] }, s return get<{ data: ModelParameterRule[] }>(url) } -export const submitFreeQuota: Fetcher<{ type: string; redirect_url?: string; result?: string }, string> = (url) => { - return post<{ type: string; redirect_url?: string; result?: string }>(url) -} - export const fetchFileUploadConfig: Fetcher = ({ url }) => { return get(url) } From 2988b67c24b8d10b64a1aad80f6c892a47646eb0 Mon Sep 17 00:00:00 2001 From: sino Date: Wed, 22 May 2024 16:44:20 +0800 Subject: [PATCH 8/9] fix: hide automatic button on automatic result page (#4494) --- .../app/configuration/config-prompt/simple-prompt-input.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx index c92a43515fa294..c75bbf327ecc27 100644 --- a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx +++ b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx @@ -148,7 +148,7 @@ const Prompt: FC = ({ )}
- {!isAgent && ( + {!isAgent && !readonly && ( )}
From 461488e9bf94ad429aa3e7481a1c4128f8a37dfe Mon Sep 17 00:00:00 2001 From: somethingwentwell <46453511+somethingwentwell@users.noreply.github.com> Date: Wed, 22 May 2024 17:43:16 +0800 Subject: [PATCH 9/9] Add Azure OpenAI API version for GPT4o support (#4569) Co-authored-by: wwwc --- .../model_providers/azure_openai/azure_openai.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml index 2f7d6fe97869b1..b9f33a8ff29d0f 100644 --- a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml +++ b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml @@ -59,6 +59,9 @@ model_credential_schema: - label: en_US: 2023-12-01-preview value: 2023-12-01-preview + - label: + en_US: '2024-02-01' + value: '2024-02-01' placeholder: zh_Hans: 在此选择您的 API 版本 en_US: Select your API Version here