diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml
index 0c7a61f4b93361..e424171019a780 100644
--- a/.github/workflows/api-tests.yml
+++ b/.github/workflows/api-tests.yml
@@ -55,6 +55,14 @@ jobs:
- name: Run Tool
run: poetry run -C api bash dev/pytest/pytest_tools.sh
+ - name: Set up dotenvs
+ run: |
+ cp docker/.env.example docker/.env
+ cp docker/middleware.env.example docker/middleware.env
+
+ - name: Expose Service Ports
+ run: sh .github/workflows/expose_service_ports.sh
+
- name: Set up Sandbox
uses: hoverkraft-tech/compose-action@v2.0.0
with:
@@ -71,12 +79,7 @@ jobs:
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
- docker/docker-compose.middleware.yaml
- docker/docker-compose.qdrant.yaml
- docker/docker-compose.milvus.yaml
- docker/docker-compose.pgvecto-rs.yaml
- docker/docker-compose.pgvector.yaml
- docker/docker-compose.chroma.yaml
+ docker/docker-compose.yaml
services: |
weaviate
qdrant
@@ -86,6 +89,5 @@ jobs:
pgvecto-rs
pgvector
chroma
-
- name: Test Vector Stores
run: poetry run -C api bash dev/pytest/pytest_vdb.sh
diff --git a/.github/workflows/db-migration-test.yml b/.github/workflows/db-migration-test.yml
index 64e8eb291c466e..67d1558dbcaaff 100644
--- a/.github/workflows/db-migration-test.yml
+++ b/.github/workflows/db-migration-test.yml
@@ -38,6 +38,11 @@ jobs:
- name: Install dependencies
run: poetry install -C api
+ - name: Prepare middleware env
+ run: |
+ cd docker
+ cp middleware.env.example middleware.env
+
- name: Set up Middlewares
uses: hoverkraft-tech/compose-action@v2.0.0
with:
diff --git a/.github/workflows/expose_service_ports.sh b/.github/workflows/expose_service_ports.sh
new file mode 100755
index 00000000000000..3418bf0c6f6688
--- /dev/null
+++ b/.github/workflows/expose_service_ports.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+yq eval '.services.weaviate.ports += ["8080:8080"]' -i docker/docker-compose.yaml
+yq eval '.services.qdrant.ports += ["6333:6333"]' -i docker/docker-compose.yaml
+yq eval '.services.chroma.ports += ["8000:8000"]' -i docker/docker-compose.yaml
+yq eval '.services["milvus-standalone"].ports += ["19530:19530"]' -i docker/docker-compose.yaml
+yq eval '.services.pgvector.ports += ["5433:5432"]' -i docker/docker-compose.yaml
+yq eval '.services["pgvecto-rs"].ports += ["5431:5432"]' -i docker/docker-compose.yaml
+
+echo "Ports exposed for sandbox, weaviate, qdrant, chroma, milvus, pgvector, pgvecto-rs."
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 66249653d717fa..0c7e5c712f008b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -144,6 +144,16 @@ api/.idea
api/.env
api/storage/*
+docker-legacy/volumes/app/storage/*
+docker-legacy/volumes/db/data/*
+docker-legacy/volumes/redis/data/*
+docker-legacy/volumes/weaviate/*
+docker-legacy/volumes/qdrant/*
+docker-legacy/volumes/etcd/*
+docker-legacy/volumes/minio/*
+docker-legacy/volumes/milvus/*
+docker-legacy/volumes/chroma/*
+
docker/volumes/app/storage/*
docker/volumes/db/data/*
docker/volumes/redis/data/*
@@ -154,6 +164,9 @@ docker/volumes/minio/*
docker/volumes/milvus/*
docker/volumes/chroma/*
+docker/nginx/conf.d/default.conf
+docker/middleware.env
+
sdks/python-client/build
sdks/python-client/dist
sdks/python-client/dify_client.egg-info
diff --git a/README.md b/README.md
index f5e06ce0fb3536..cd6dc3605a7927 100644
--- a/README.md
+++ b/README.md
@@ -174,6 +174,7 @@ The easiest way to start the Dify server is to run our [docker-compose.yml](dock
```bash
cd docker
+cp .env.example .env
docker compose up -d
```
@@ -183,7 +184,7 @@ After running, you can access the Dify dashboard in your browser at [http://loca
## Next steps
-If you need to customize the configuration, please refer to the comments in our [docker-compose.yml](docker/docker-compose.yaml) file and manually set the environment configuration. After making the changes, please run `docker-compose up -d` again. You can see the full list of environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
+If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes.
diff --git a/README_AR.md b/README_AR.md
index 1b0127b9e6be3d..803a9b7eeb7803 100644
--- a/README_AR.md
+++ b/README_AR.md
@@ -157,15 +157,17 @@
```bash
cd docker
+cp .env.example .env
docker compose up -d
```
+
بعد التشغيل، يمكنك الوصول إلى لوحة تحكم Dify في متصفحك على [http://localhost/install](http://localhost/install) وبدء عملية التهيئة.
> إذا كنت ترغب في المساهمة في Dify أو القيام بتطوير إضافي، فانظر إلى [دليلنا للنشر من الشفرة (code) المصدرية](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
## الخطوات التالية
-إذا كنت بحاجة إلى تخصيص التكوين، يرجى الرجوع إلى التعليقات في ملف [docker-compose.yml](docker/docker-compose.yaml) لدينا وتعيين التكوينات البيئية يدويًا. بعد إجراء التغييرات، يرجى تشغيل `docker-compose up -d` مرة أخرى. يمكنك رؤية قائمة كاملة بالمتغيرات البيئية [هنا](https://docs.dify.ai/getting-started/install-self-hosted/environments).
+إذا كنت بحاجة إلى تخصيص الإعدادات، فيرجى الرجوع إلى التعليقات في ملف [.env.example](docker/.env.example) وتحديث القيم المقابلة في ملف `.env`. بالإضافة إلى ذلك، قد تحتاج إلى إجراء تعديلات على ملف `docker-compose.yaml` نفسه، مثل تغيير إصدارات الصور أو تعيينات المنافذ أو نقاط تحميل وحدات التخزين، بناءً على بيئة النشر ومتطلباتك الخاصة. بعد إجراء أي تغييرات، يرجى إعادة تشغيل `docker-compose up -d`. يمكنك العثور على قائمة كاملة بمتغيرات البيئة المتاحة [هنا](https://docs.dify.ai/getting-started/install-self-hosted/environments).
يوجد مجتمع خاص بـ [Helm Charts](https://helm.sh/) وملفات YAML التي تسمح بتنفيذ Dify على Kubernetes للنظام من الإيجابيات العلوية.
diff --git a/README_CN.md b/README_CN.md
index 141dc152ec14df..66d551e254a370 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -179,11 +179,16 @@ Dify 是一个开源的 LLM 应用开发平台。其直观的界面结合了 AI
```bash
cd docker
+cp .env.example .env
docker compose up -d
```
运行后,可以在浏览器上访问 [http://localhost/install](http://localhost/install) 进入 Dify 控制台并开始初始化安装操作。
+### 自定义配置
+
+如果您需要自定义配置,请参考 [.env.example](docker/.env.example) 文件中的注释,并更新 `.env` 文件中对应的值。此外,您可能需要根据您的具体部署环境和需求对 `docker-compose.yaml` 文件本身进行调整,例如更改镜像版本、端口映射或卷挂载。完成任何更改后,请重新运行 `docker-compose up -d`。您可以在[此处](https://docs.dify.ai/getting-started/install-self-hosted/environments)找到可用环境变量的完整列表。
+
#### 使用 Helm Chart 部署
使用 [Helm Chart](https://helm.sh/) 版本或者 YAML 文件,可以在 Kubernetes 上部署 Dify。
@@ -192,10 +197,6 @@ docker compose up -d
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
- [YAML 文件 by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
-### 配置
-
-如果您需要自定义配置,请参考我们的 [docker-compose.yml](docker/docker-compose.yaml) 文件中的注释,并手动设置环境配置。更改后,请再次运行 `docker-compose up -d`。您可以在我们的[文档](https://docs.dify.ai/getting-started/install-self-hosted/environments)中查看所有环境变量的完整列表。
-
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
diff --git a/README_ES.md b/README_ES.md
index a26719a2f283cd..625d5ea2c7e6ce 100644
--- a/README_ES.md
+++ b/README_ES.md
@@ -179,6 +179,7 @@ La forma más fácil de iniciar el servidor de Dify es ejecutar nuestro archivo
```bash
cd docker
+cp .env.example .env
docker compose up -d
```
@@ -188,7 +189,7 @@ Después de ejecutarlo, puedes acceder al panel de control de Dify en tu navegad
## Próximos pasos
-Si necesitas personalizar la configuración, consulta los comentarios en nuestro archivo [docker-compose.yml](docker/docker-compose.yaml) y configura manualmente la configuración del entorno
+Si necesita personalizar la configuración, consulte los comentarios en nuestro archivo [.env.example](docker/.env.example) y actualice los valores correspondientes en su archivo `.env`. Además, es posible que deba realizar ajustes en el propio archivo `docker-compose.yaml`, como cambiar las versiones de las imágenes, las asignaciones de puertos o los montajes de volúmenes, según su entorno de implementación y requisitos específicos. Después de realizar cualquier cambio, vuelva a ejecutar `docker-compose up -d`. Puede encontrar la lista completa de variables de entorno disponibles [aquí](https://docs.dify.ai/getting-started/install-self-hosted/environments).
. Después de realizar los cambios, ejecuta `docker-compose up -d` nuevamente. Puedes ver la lista completa de variables de entorno [aquí](https://docs.dify.ai/getting-started/install-self-hosted/environments).
diff --git a/README_FR.md b/README_FR.md
index b754ffaef7e66f..57931c626a1e6e 100644
--- a/README_FR.md
+++ b/README_FR.md
@@ -179,6 +179,7 @@ La manière la plus simple de démarrer le serveur Dify est d'exécuter notre fi
```bash
cd docker
+cp .env.example .env
docker compose up -d
```
@@ -188,9 +189,7 @@ Après l'exécution, vous pouvez accéder au tableau de bord Dify dans votre nav
## Prochaines étapes
-Si vous devez personnaliser la configuration, veuillez
-
- vous référer aux commentaires dans notre fichier [docker-compose.yml](docker/docker-compose.yaml) et définir manuellement la configuration de l'environnement. Après avoir apporté les modifications, veuillez exécuter à nouveau `docker-compose up -d`. Vous pouvez voir la liste complète des variables d'environnement [ici](https://docs.dify.ai/getting-started/install-self-hosted/environments).
+Si vous devez personnaliser la configuration, veuillez vous référer aux commentaires dans notre fichier [.env.example](docker/.env.example) et mettre à jour les valeurs correspondantes dans votre fichier `.env`. De plus, vous devrez peut-être apporter des modifications au fichier `docker-compose.yaml` lui-même, comme changer les versions d'image, les mappages de ports ou les montages de volumes, en fonction de votre environnement de déploiement et de vos exigences spécifiques. Après avoir effectué des modifications, veuillez réexécuter `docker-compose up -d`. Vous pouvez trouver la liste complète des variables d'environnement disponibles [ici](https://docs.dify.ai/getting-started/install-self-hosted/environments).
Si vous souhaitez configurer une configuration haute disponibilité, la communauté fournit des [Helm Charts](https://helm.sh/) et des fichiers YAML, à travers lesquels vous pouvez déployer Dify sur Kubernetes.
diff --git a/README_JA.md b/README_JA.md
index 2d78992eb32383..8fc0df7cd657ff 100644
--- a/README_JA.md
+++ b/README_JA.md
@@ -178,6 +178,7 @@ Difyサーバーを起動する最も簡単な方法は、[docker-compose.yml](d
```bash
cd docker
+cp .env.example .env
docker compose up -d
```
@@ -187,7 +188,7 @@ docker compose up -d
## 次のステップ
-環境設定をカスタマイズする場合は、[docker-compose.yml](docker/docker-compose.yaml)ファイル内のコメントを参照して、環境設定を手動で設定してください。変更を加えた後は、再び `docker-compose up -d` を実行してください。環境変数の完全なリストは[こちら](https://docs.dify.ai/getting-started/install-self-hosted/environments)をご覧ください。
+設定をカスタマイズする必要がある場合は、[.env.example](docker/.env.example) ファイルのコメントを参照し、`.env` ファイルの対応する値を更新してください。さらに、デプロイ環境や要件に応じて、`docker-compose.yaml` ファイル自体を調整する必要がある場合があります。たとえば、イメージのバージョン、ポートのマッピング、ボリュームのマウントなどを変更します。変更を加えた後は、`docker-compose up -d` を再実行してください。利用可能な環境変数の全一覧は、[こちら](https://docs.dify.ai/getting-started/install-self-hosted/environments)で確認できます。
高可用性設定を設定する必要がある場合、コミュニティは[Helm Charts](https://helm.sh/)とYAMLファイルにより、DifyをKubernetesにデプロイすることができます。
diff --git a/README_KL.md b/README_KL.md
index 033c73fb99e380..5dd4f95dff32ba 100644
--- a/README_KL.md
+++ b/README_KL.md
@@ -179,6 +179,7 @@ The easiest way to start the Dify server is to run our [docker-compose.yml](dock
```bash
cd docker
+cp .env.example .env
docker compose up -d
```
@@ -188,7 +189,7 @@ After running, you can access the Dify dashboard in your browser at [http://loca
## Next steps
-If you need to customize the configuration, please refer to the comments in our [docker-compose.yml](docker/docker-compose.yaml) file and manually set the environment configuration. After making the changes, please run `docker-compose up -d` again. You can see the full list of environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
+If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes.
diff --git a/README_KR.md b/README_KR.md
index 4c48faa7fb323a..a9e2a174ea7f99 100644
--- a/README_KR.md
+++ b/README_KR.md
@@ -172,6 +172,7 @@ Dify 서버를 시작하는 가장 쉬운 방법은 [docker-compose.yml](docker/
```bash
cd docker
+cp .env.example .env
docker compose up -d
```
@@ -181,8 +182,7 @@ docker compose up -d
## 다음 단계
-구성 커스터마이징이 필요한 경우, [docker-compose.yml](docker/docker-compose.yaml) 파일의 코멘트를 참조하여 환경 구성을 수동으로 설정하십시오. 변경 후 `docker-compose up -d` 를 다시 실행하십시오. 환경 변수의 전체 목록은 [여기](https://docs.dify.ai/getting-started/install-self-hosted/environments)에서 확인할 수 있습니다.
-
+구성을 사용자 정의해야 하는 경우 [.env.example](docker/.env.example) 파일의 주석을 참조하고 `.env` 파일에서 해당 값을 업데이트하십시오. 또한 특정 배포 환경 및 요구 사항에 따라 `docker-compose.yaml` 파일 자체를 조정해야 할 수도 있습니다. 예를 들어 이미지 버전, 포트 매핑 또는 볼륨 마운트를 변경합니다. 변경 한 후 `docker-compose up -d`를 다시 실행하십시오. 사용 가능한 환경 변수의 전체 목록은 [여기](https://docs.dify.ai/getting-started/install-self-hosted/environments)에서 찾을 수 있습니다.
Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했다는 커뮤니티가 제공하는 [Helm Charts](https://helm.sh/)와 YAML 파일이 존재합니다.
diff --git a/api/Dockerfile b/api/Dockerfile
index 4f0e7f65e3f744..53c33a76591fa4 100644
--- a/api/Dockerfile
+++ b/api/Dockerfile
@@ -23,7 +23,6 @@ RUN apt-get update \
COPY pyproject.toml poetry.lock ./
RUN poetry install --sync --no-cache --no-root
-
# production stage
FROM base AS production
diff --git a/api/commands.py b/api/commands.py
index 91d77370236322..56217c898e22e2 100644
--- a/api/commands.py
+++ b/api/commands.py
@@ -12,6 +12,7 @@
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.datasource.vdb.vector_type import VectorType
from core.rag.models.document import Document
+from events.app_event import app_was_created
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from libs.helper import email as email_validate
@@ -585,6 +586,53 @@ def upgrade_db():
click.echo('Database migration skipped')
+@click.command('fix-app-site-missing', help='Fix app related site missing issue.')
+def fix_app_site_missing():
+ """
+ Fix app related site missing issue.
+ """
+ click.echo(click.style('Start fix app related site missing issue.', fg='green'))
+
+ failed_app_ids = []
+ while True:
+ sql = """select apps.id as id from apps left join sites on sites.app_id=apps.id
+where sites.id is null limit 1000"""
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(sql))
+
+ processed_count = 0
+ for i in rs:
+ processed_count += 1
+ app_id = str(i.id)
+
+ if app_id in failed_app_ids:
+ continue
+
+ try:
+ app = db.session.query(App).filter(App.id == app_id).first()
+ tenant = app.tenant
+ if tenant:
+ accounts = tenant.get_accounts()
+ if not accounts:
+ print("Fix app {} failed.".format(app.id))
+ continue
+
+ account = accounts[0]
+ print("Fix app {} related site missing issue.".format(app.id))
+ app_was_created.send(app, account=account)
+ except Exception as e:
+ failed_app_ids.append(app_id)
+ click.echo(click.style('Fix app {} related site missing issue failed!'.format(app_id), fg='red'))
+ logging.exception(f'Fix app related site missing issue failed, error: {e}')
+ continue
+
+ if not processed_count:
+ break
+
+
+ click.echo(click.style('Congratulations! Fix app related site missing issue successful!', fg='green'))
+
+
def register_commands(app):
app.cli.add_command(reset_password)
app.cli.add_command(reset_email)
@@ -594,3 +642,4 @@ def register_commands(app):
app.cli.add_command(add_qdrant_doc_id_index)
app.cli.add_command(create_tenant)
app.cli.add_command(upgrade_db)
+ app.cli.add_command(fix_app_site_missing)
diff --git a/api/configs/app_config.py b/api/configs/app_config.py
index f3bab64fb2d0be..4467b84c8666e6 100644
--- a/api/configs/app_config.py
+++ b/api/configs/app_config.py
@@ -36,7 +36,6 @@ class DifyConfig(
# read from dotenv format config file
env_file='.env',
env_file_encoding='utf-8',
- env_ignore_empty=True,
# ignore extra attributes
extra='ignore',
diff --git a/api/configs/packaging/__init__.py b/api/configs/packaging/__init__.py
index 95ccb850ed3b40..e9b389df8d3f97 100644
--- a/api/configs/packaging/__init__.py
+++ b/api/configs/packaging/__init__.py
@@ -8,7 +8,7 @@ class PackagingInfo(BaseModel):
CURRENT_VERSION: str = Field(
description='Dify version',
- default='0.6.11',
+ default='0.6.12-fix1',
)
COMMIT_SHA: str = Field(
diff --git a/api/controllers/console/setup.py b/api/controllers/console/setup.py
index a8fdde2791c426..def50212a18b82 100644
--- a/api/controllers/console/setup.py
+++ b/api/controllers/console/setup.py
@@ -3,11 +3,10 @@
from flask import current_app, request
from flask_restful import Resource, reqparse
-from extensions.ext_database import db
from libs.helper import email, get_remote_ip, str_len
from libs.password import valid_password
from models.model import DifySetup
-from services.account_service import AccountService, RegisterService, TenantService
+from services.account_service import RegisterService, TenantService
from . import api
from .error import AlreadySetupError, NotInitValidateError, NotSetupError
@@ -51,28 +50,17 @@ def post(self):
required=True, location='json')
args = parser.parse_args()
- # Register
- account = RegisterService.register(
+ # setup
+ RegisterService.setup(
email=args['email'],
name=args['name'],
- password=args['password']
+ password=args['password'],
+ ip_address=get_remote_ip(request)
)
- TenantService.create_owner_tenant_if_not_exist(account)
-
- setup()
- AccountService.update_last_login(account, ip_address=get_remote_ip(request))
-
return {'result': 'success'}, 201
-def setup():
- dify_setup = DifySetup(
- version=current_app.config['CURRENT_VERSION']
- )
- db.session.add(dify_setup)
-
-
def setup_required(view):
@wraps(view)
def decorated(*args, **kwargs):
diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py
index e79ac05a752e4e..513fc692ffbb85 100644
--- a/api/core/app/task_pipeline/workflow_cycle_manage.py
+++ b/api/core/app/task_pipeline/workflow_cycle_manage.py
@@ -167,13 +167,14 @@ def _workflow_run_failed(
db.session.refresh(workflow_run)
db.session.close()
- trace_manager.add_trace_task(
- TraceTask(
- TraceTaskName.WORKFLOW_TRACE,
- workflow_run=workflow_run,
- conversation_id=conversation_id,
+ if trace_manager:
+ trace_manager.add_trace_task(
+ TraceTask(
+ TraceTaskName.WORKFLOW_TRACE,
+ workflow_run=workflow_run,
+ conversation_id=conversation_id,
+ )
)
- )
return workflow_run
diff --git a/api/core/model_runtime/model_providers/moonshot/llm/llm.py b/api/core/model_runtime/model_providers/moonshot/llm/llm.py
index ef301b0f6c3897..17cf65dc3adf70 100644
--- a/api/core/model_runtime/model_providers/moonshot/llm/llm.py
+++ b/api/core/model_runtime/model_providers/moonshot/llm/llm.py
@@ -93,7 +93,7 @@ def _add_function_call(self, model: str, credentials: dict) -> None:
}.intersection(model_schema.features or []):
credentials['function_calling_type'] = 'tool_call'
- def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict:
+ def _convert_prompt_message_to_dict(self, message: PromptMessage, credentials: Optional[dict] = None) -> dict:
"""
Convert PromptMessage to dict for OpenAI API format
"""
diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llm.py b/api/core/model_runtime/model_providers/nvidia/llm/llm.py
index 4b2dbf3d3a5242..11252b92115df7 100644
--- a/api/core/model_runtime/model_providers/nvidia/llm/llm.py
+++ b/api/core/model_runtime/model_providers/nvidia/llm/llm.py
@@ -200,7 +200,7 @@ def _generate(self, model: str, credentials: dict, prompt_messages: list[PromptM
endpoint_url = str(URL(endpoint_url) / 'chat' / 'completions')
elif 'server_url' in credentials:
endpoint_url = server_url
- data['messages'] = [self._convert_prompt_message_to_dict(m) for m in prompt_messages]
+ data['messages'] = [self._convert_prompt_message_to_dict(m, credentials) for m in prompt_messages]
elif completion_type is LLMMode.COMPLETION:
data['prompt'] = 'ping'
if 'endpoint_url' in credentials:
diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py
index 36eae2042d1cc0..b76f460737ba6a 100644
--- a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py
+++ b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py
@@ -582,7 +582,7 @@ def _handle_generate_response(self, model: str, credentials: dict, response: req
return result
- def _convert_prompt_message_to_dict(self, message: PromptMessage, credentials: dict = None) -> dict:
+ def _convert_prompt_message_to_dict(self, message: PromptMessage, credentials: Optional[dict] = None) -> dict:
"""
Convert PromptMessage to dict for OpenAI API format
"""
diff --git a/api/core/model_runtime/model_providers/spark/llm/_client.py b/api/core/model_runtime/model_providers/spark/llm/_client.py
index 4c8790141d26a4..10da265701a423 100644
--- a/api/core/model_runtime/model_providers/spark/llm/_client.py
+++ b/api/core/model_runtime/model_providers/spark/llm/_client.py
@@ -38,6 +38,10 @@ def __init__(self, model: str, app_id: str, api_key: str, api_secret: str, api_d
'spark-3.5': {
'version': 'v3.5',
'chat_domain': 'generalv3.5'
+ },
+ 'spark-4': {
+ 'version': 'v4.0',
+ 'chat_domain': '4.0Ultra'
}
}
diff --git a/api/core/model_runtime/model_providers/spark/llm/_position.yaml b/api/core/model_runtime/model_providers/spark/llm/_position.yaml
index 64c2db77ce86c6..e49ee97db7cf56 100644
--- a/api/core/model_runtime/model_providers/spark/llm/_position.yaml
+++ b/api/core/model_runtime/model_providers/spark/llm/_position.yaml
@@ -1,3 +1,4 @@
+- spark-4
- spark-3.5
- spark-3
- spark-1.5
diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-1.5.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-1.5.yaml
index effbe45e27e8c8..41b8765fe6c4f1 100644
--- a/api/core/model_runtime/model_providers/spark/llm/spark-1.5.yaml
+++ b/api/core/model_runtime/model_providers/spark/llm/spark-1.5.yaml
@@ -28,6 +28,6 @@ parameter_rules:
min: 1
max: 6
help:
- zh_Hans: 从 k 个候选中随机选择⼀个(⾮等概率)。
+ zh_Hans: 从 k 个候选中随机选择一个(非等概率)。
en_US: Randomly select one from k candidates (non-equal probability).
required: false
diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-2.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-2.yaml
index 2afd1fc538fcd6..2db6805a2e2af0 100644
--- a/api/core/model_runtime/model_providers/spark/llm/spark-2.yaml
+++ b/api/core/model_runtime/model_providers/spark/llm/spark-2.yaml
@@ -29,6 +29,6 @@ parameter_rules:
min: 1
max: 6
help:
- zh_Hans: 从 k 个候选中随机选择⼀个(⾮等概率)。
+ zh_Hans: 从 k 个候选中随机选择一个(非等概率)。
en_US: Randomly select one from k candidates (non-equal probability).
required: false
diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-3.5.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-3.5.yaml
index 650eff5d9801ac..6d24932ea83076 100644
--- a/api/core/model_runtime/model_providers/spark/llm/spark-3.5.yaml
+++ b/api/core/model_runtime/model_providers/spark/llm/spark-3.5.yaml
@@ -28,6 +28,6 @@ parameter_rules:
min: 1
max: 6
help:
- zh_Hans: 从 k 个候选中随机选择⼀个(⾮等概率)。
+ zh_Hans: 从 k 个候选中随机选择一个(非等概率)。
en_US: Randomly select one from k candidates (non-equal probability).
required: false
diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-3.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-3.yaml
index dc0f66f670ce78..2ef9e10f453f6b 100644
--- a/api/core/model_runtime/model_providers/spark/llm/spark-3.yaml
+++ b/api/core/model_runtime/model_providers/spark/llm/spark-3.yaml
@@ -28,6 +28,6 @@ parameter_rules:
min: 1
max: 6
help:
- zh_Hans: 从 k 个候选中随机选择⼀个(⾮等概率)。
+ zh_Hans: 从 k 个候选中随机选择一个(非等概率)。
en_US: Randomly select one from k candidates (non-equal probability).
required: false
diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-4.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-4.yaml
new file mode 100644
index 00000000000000..4b0bf27029ff76
--- /dev/null
+++ b/api/core/model_runtime/model_providers/spark/llm/spark-4.yaml
@@ -0,0 +1,33 @@
+model: spark-4
+label:
+ en_US: Spark V4.0
+model_type: llm
+model_properties:
+ mode: chat
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ default: 0.5
+ help:
+ zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。
+ en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question.
+ - name: max_tokens
+ use_template: max_tokens
+ default: 4096
+ min: 1
+ max: 8192
+ help:
+ zh_Hans: 模型回答的tokens的最大长度。
+ en_US: 模型回答的tokens的最大长度。
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ default: 4
+ min: 1
+ max: 6
+ help:
+ zh_Hans: 从 k 个候选中随机选择一个(非等概率)。
+ en_US: Randomly select one from k candidates (non-equal probability).
+ required: false
diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py
index 46795c8c3cc4aa..5b5d5def1bd541 100644
--- a/api/core/ops/langfuse_trace/langfuse_trace.py
+++ b/api/core/ops/langfuse_trace/langfuse_trace.py
@@ -121,7 +121,9 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
node_type = node_execution.node_type
status = node_execution.status
if node_type == "llm":
- inputs = json.loads(node_execution.process_data).get("prompts", {})
+ inputs = json.loads(node_execution.process_data).get(
+ "prompts", {}
+ ) if node_execution.process_data else {}
else:
inputs = json.loads(node_execution.inputs) if node_execution.inputs else {}
outputs = (
@@ -213,7 +215,9 @@ def message_trace(
end_user_data: EndUser = db.session.query(EndUser).filter(
EndUser.id == message_data.from_end_user_id
).first()
- user_id = end_user_data.session_id
+ if end_user_data is not None:
+ user_id = end_user_data.session_id
+ metadata["user_id"] = user_id
trace_data = LangfuseTrace(
id=message_id,
diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py
index 422830fb1e4df4..0fee076d55a47b 100644
--- a/api/core/ops/langsmith_trace/langsmith_trace.py
+++ b/api/core/ops/langsmith_trace/langsmith_trace.py
@@ -114,7 +114,9 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
node_type = node_execution.node_type
status = node_execution.status
if node_type == "llm":
- inputs = json.loads(node_execution.process_data).get("prompts", {})
+ inputs = json.loads(node_execution.process_data).get(
+ "prompts", {}
+ ) if node_execution.process_data else {}
else:
inputs = json.loads(node_execution.inputs) if node_execution.inputs else {}
outputs = (
@@ -181,13 +183,15 @@ def message_trace(self, trace_info: MessageTraceInfo):
message_id = message_data.id
user_id = message_data.from_account_id
+ metadata["user_id"] = user_id
+
if message_data.from_end_user_id:
end_user_data: EndUser = db.session.query(EndUser).filter(
EndUser.id == message_data.from_end_user_id
- ).first().session_id
- end_user_id = end_user_data.session_id
- metadata["end_user_id"] = end_user_id
- metadata["user_id"] = user_id
+ ).first()
+ if end_user_data is not None:
+ end_user_id = end_user_data.session_id
+ metadata["end_user_id"] = end_user_id
message_run = LangSmithRunModel(
input_tokens=trace_info.message_tokens,
diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py
index 2ce12f28d12325..ff15aa999b4b46 100644
--- a/api/core/ops/ops_trace_manager.py
+++ b/api/core/ops/ops_trace_manager.py
@@ -352,10 +352,17 @@ def workflow_trace(self, workflow_run: WorkflowRun, conversation_id):
query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or ""
# get workflow_app_log_id
- workflow_app_log_data = db.session.query(WorkflowAppLog).filter_by(workflow_run_id=workflow_run.id).first()
+ workflow_app_log_data = db.session.query(WorkflowAppLog).filter_by(
+ tenant_id=tenant_id,
+ app_id=workflow_run.app_id,
+ workflow_run_id=workflow_run.id
+ ).first()
workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None
# get message_id
- message_data = db.session.query(Message.id).filter_by(workflow_run_id=workflow_run_id).first()
+ message_data = db.session.query(Message.id).filter_by(
+ conversation_id=conversation_id,
+ workflow_run_id=workflow_run_id
+ ).first()
message_id = str(message_data.id) if message_data else None
metadata = {
@@ -660,7 +667,7 @@ def generate_name_trace(self, conversation_id, timer, **kwargs):
trace_manager_timer = None
trace_manager_queue = queue.Queue()
-trace_manager_interval = int(os.getenv("TRACE_QUEUE_MANAGER_INTERVAL", 1))
+trace_manager_interval = int(os.getenv("TRACE_QUEUE_MANAGER_INTERVAL", 5))
trace_manager_batch_size = int(os.getenv("TRACE_QUEUE_MANAGER_BATCH_SIZE", 100))
diff --git a/api/core/tools/provider/builtin/json_process/_assets/icon.svg b/api/core/tools/provider/builtin/json_process/_assets/icon.svg
new file mode 100644
index 00000000000000..b123983836962a
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/_assets/icon.svg
@@ -0,0 +1,358 @@
+
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/json_process/json_process.py b/api/core/tools/provider/builtin/json_process/json_process.py
new file mode 100644
index 00000000000000..f6eed3c6282314
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/json_process.py
@@ -0,0 +1,17 @@
+from typing import Any
+
+from core.tools.errors import ToolProviderCredentialValidationError
+from core.tools.provider.builtin.json_process.tools.parse import JSONParseTool
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+
+
+class JsonExtractProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict[str, Any]) -> None:
+ try:
+ JSONParseTool().invoke(user_id='',
+ tool_parameters={
+ 'content': '{"name": "John", "age": 30, "city": "New York"}',
+ 'json_filter': '$.name'
+ })
+ except Exception as e:
+ raise ToolProviderCredentialValidationError(str(e))
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/json_process/json_process.yaml b/api/core/tools/provider/builtin/json_process/json_process.yaml
new file mode 100644
index 00000000000000..c7896bbea7a69f
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/json_process.yaml
@@ -0,0 +1,14 @@
+identity:
+ author: Mingwei Zhang
+ name: json_process
+ label:
+ en_US: JSON Process
+ zh_Hans: JSON 处理
+ pt_BR: JSON Process
+ description:
+ en_US: Tools for processing JSON content using jsonpath_ng
+ zh_Hans: 利用 jsonpath_ng 处理 JSON 内容的工具
+ pt_BR: Tools for processing JSON content using jsonpath_ng
+ icon: icon.svg
+ tags:
+ - utilities
diff --git a/api/core/tools/provider/builtin/json_process/tools/delete.py b/api/core/tools/provider/builtin/json_process/tools/delete.py
new file mode 100644
index 00000000000000..b09e4948813f5b
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/tools/delete.py
@@ -0,0 +1,59 @@
+import json
+from typing import Any, Union
+
+from jsonpath_ng import parse
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+
+
+class JSONDeleteTool(BuiltinTool):
+ def _invoke(self,
+ user_id: str,
+ tool_parameters: dict[str, Any],
+ ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+ """
+ Invoke the JSON delete tool
+ """
+ # Get content
+ content = tool_parameters.get('content', '')
+ if not content:
+ return self.create_text_message('Invalid parameter content')
+
+ # Get query
+ query = tool_parameters.get('query', '')
+ if not query:
+ return self.create_text_message('Invalid parameter query')
+
+ try:
+ result = self._delete(content, query)
+ return self.create_text_message(str(result))
+ except Exception as e:
+ return self.create_text_message(f'Failed to delete JSON content: {str(e)}')
+
+ def _delete(self, origin_json: str, query: str) -> str:
+ try:
+ input_data = json.loads(origin_json)
+ expr = parse('$.' + query.lstrip('$.')) # Ensure query path starts with $
+
+ matches = expr.find(input_data)
+
+ if not matches:
+ return json.dumps(input_data, ensure_ascii=True) # No changes if no matches found
+
+ for match in matches:
+ if isinstance(match.context.value, dict):
+ # Delete key from dictionary
+ del match.context.value[match.path.fields[-1]]
+ elif isinstance(match.context.value, list):
+ # Remove item from list
+ match.context.value.remove(match.value)
+ else:
+ # For other cases, we might want to set to None or remove the parent key
+ parent = match.context.parent
+ if parent:
+ del parent.value[match.path.fields[-1]]
+
+ return json.dumps(input_data, ensure_ascii=True)
+ except Exception as e:
+ raise Exception(f"Delete operation failed: {str(e)}")
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/json_process/tools/delete.yaml b/api/core/tools/provider/builtin/json_process/tools/delete.yaml
new file mode 100644
index 00000000000000..4cfa90b861d48f
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/tools/delete.yaml
@@ -0,0 +1,40 @@
+identity:
+ name: json_delete
+ author: Mingwei Zhang
+ label:
+ en_US: JSON Delete
+ zh_Hans: JSON 删除
+ pt_BR: JSON Delete
+description:
+ human:
+ en_US: A tool for deleting JSON content
+ zh_Hans: 一个删除 JSON 内容的工具
+ pt_BR: A tool for deleting JSON content
+ llm: A tool for deleting JSON content
+parameters:
+ - name: content
+ type: string
+ required: true
+ label:
+ en_US: JSON content
+ zh_Hans: JSON 内容
+ pt_BR: JSON content
+ human_description:
+ en_US: JSON content to be processed
+ zh_Hans: 待处理的 JSON 内容
+ pt_BR: JSON content to be processed
+ llm_description: JSON content to be processed
+ form: llm
+ - name: query
+ type: string
+ required: true
+ label:
+ en_US: Query
+ zh_Hans: 查询
+ pt_BR: Query
+ human_description:
+ en_US: JSONPath query to locate the element to delete
+ zh_Hans: 用于定位要删除元素的 JSONPath 查询
+ pt_BR: JSONPath query to locate the element to delete
+ llm_description: JSONPath query to locate the element to delete
+ form: llm
diff --git a/api/core/tools/provider/builtin/json_process/tools/insert.py b/api/core/tools/provider/builtin/json_process/tools/insert.py
new file mode 100644
index 00000000000000..aa5986e2b45860
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/tools/insert.py
@@ -0,0 +1,97 @@
+import json
+from typing import Any, Union
+
+from jsonpath_ng import parse
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+
+
+class JSONParseTool(BuiltinTool):
+ def _invoke(self,
+ user_id: str,
+ tool_parameters: dict[str, Any],
+ ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+ """
+ invoke tools
+ """
+ # get content
+ content = tool_parameters.get('content', '')
+ if not content:
+ return self.create_text_message('Invalid parameter content')
+
+ # get query
+ query = tool_parameters.get('query', '')
+ if not query:
+ return self.create_text_message('Invalid parameter query')
+
+ # get new value
+ new_value = tool_parameters.get('new_value', '')
+ if not new_value:
+ return self.create_text_message('Invalid parameter new_value')
+
+ # get insert position
+ index = tool_parameters.get('index')
+
+ # get create path
+ create_path = tool_parameters.get('create_path', False)
+
+ try:
+ result = self._insert(content, query, new_value, index, create_path)
+ return self.create_text_message(str(result))
+ except Exception:
+ return self.create_text_message('Failed to insert JSON content')
+
+
+ def _insert(self, origin_json, query, new_value, index=None, create_path=False):
+ try:
+ input_data = json.loads(origin_json)
+ expr = parse(query)
+ try:
+ new_value = json.loads(new_value)
+ except json.JSONDecodeError:
+ new_value = new_value
+
+ matches = expr.find(input_data)
+
+ if not matches and create_path:
+ # create new path
+ path_parts = query.strip('$').strip('.').split('.')
+ current = input_data
+ for i, part in enumerate(path_parts):
+ if '[' in part and ']' in part:
+ # process array index
+ array_name, index = part.split('[')
+ index = int(index.rstrip(']'))
+ if array_name not in current:
+ current[array_name] = []
+ while len(current[array_name]) <= index:
+ current[array_name].append({})
+ current = current[array_name][index]
+ else:
+ if i == len(path_parts) - 1:
+ current[part] = new_value
+ elif part not in current:
+ current[part] = {}
+ current = current[part]
+ else:
+ for match in matches:
+ if isinstance(match.value, dict):
+ # insert new value into dict
+ if isinstance(new_value, dict):
+ match.value.update(new_value)
+ else:
+ raise ValueError("Cannot insert non-dict value into dict")
+ elif isinstance(match.value, list):
+ # insert new value into list
+ if index is None:
+ match.value.append(new_value)
+ else:
+ match.value.insert(int(index), new_value)
+ else:
+ # replace old value with new value
+ match.full_path.update(input_data, new_value)
+
+ return json.dumps(input_data, ensure_ascii=True)
+ except Exception as e:
+ return str(e)
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/json_process/tools/insert.yaml b/api/core/tools/provider/builtin/json_process/tools/insert.yaml
new file mode 100644
index 00000000000000..66a6ff99291cdb
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/tools/insert.yaml
@@ -0,0 +1,77 @@
+identity:
+ name: json_insert
+ author: Mingwei Zhang
+ label:
+ en_US: JSON Insert
+ zh_Hans: JSON 插入
+ pt_BR: JSON Insert
+description:
+ human:
+ en_US: A tool for inserting JSON content
+ zh_Hans: 一个插入 JSON 内容的工具
+ pt_BR: A tool for inserting JSON content
+ llm: A tool for inserting JSON content
+parameters:
+ - name: content
+ type: string
+ required: true
+ label:
+ en_US: JSON content
+ zh_Hans: JSON 内容
+ pt_BR: JSON content
+ human_description:
+ en_US: JSON content
+ zh_Hans: JSON 内容
+ pt_BR: JSON content
+ llm_description: JSON content to be processed
+ form: llm
+ - name: query
+ type: string
+ required: true
+ label:
+ en_US: Query
+ zh_Hans: 查询
+ pt_BR: Query
+ human_description:
+ en_US: Object to insert
+ zh_Hans: 待插入的对象
+ pt_BR: Object to insert
+ llm_description: JSONPath query to locate the element to insert
+ form: llm
+ - name: new_value
+ type: string
+ required: true
+ label:
+ en_US: New Value
+ zh_Hans: 新值
+ pt_BR: New Value
+ human_description:
+ en_US: New Value
+ zh_Hans: 新值
+ pt_BR: New Value
+ llm_description: New Value to insert
+ form: llm
+ - name: create_path
+ type: select
+ required: true
+ default: "False"
+ label:
+ en_US: Whether to create a path
+ zh_Hans: 是否创建路径
+ pt_BR: Whether to create a path
+ human_description:
+ en_US: Whether to create a path when the path does not exist
+ zh_Hans: 查询路径不存在时是否创建路径
+ pt_BR: Whether to create a path when the path does not exist
+ options:
+ - value: "True"
+ label:
+ en_US: "Yes"
+ zh_Hans: 是
+ pt_BR: "Yes"
+ - value: "False"
+ label:
+ en_US: "No"
+ zh_Hans: 否
+ pt_BR: "No"
+ form: form
diff --git a/api/core/tools/provider/builtin/json_process/tools/parse.py b/api/core/tools/provider/builtin/json_process/tools/parse.py
new file mode 100644
index 00000000000000..b246afc07e8b12
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/tools/parse.py
@@ -0,0 +1,51 @@
+import json
+from typing import Any, Union
+
+from jsonpath_ng import parse
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+
+
+class JSONParseTool(BuiltinTool):
+ def _invoke(self,
+ user_id: str,
+ tool_parameters: dict[str, Any],
+ ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+ """
+ invoke tools
+ """
+ # get content
+ content = tool_parameters.get('content', '')
+ if not content:
+ return self.create_text_message('Invalid parameter content')
+
+ # get json filter
+ json_filter = tool_parameters.get('json_filter', '')
+ if not json_filter:
+ return self.create_text_message('Invalid parameter json_filter')
+
+ try:
+ result = self._extract(content, json_filter)
+ return self.create_text_message(str(result))
+ except Exception:
+ return self.create_text_message('Failed to extract JSON content')
+
+ # Extract data from JSON content
+ def _extract(self, content: str, json_filter: str) -> str:
+ try:
+ input_data = json.loads(content)
+ expr = parse(json_filter)
+ result = [match.value for match in expr.find(input_data)]
+
+ if len(result) == 1:
+ result = result[0]
+
+ if isinstance(result, dict | list):
+ return json.dumps(result, ensure_ascii=True)
+ elif isinstance(result, str | int | float | bool) or result is None:
+ return str(result)
+ else:
+ return repr(result)
+ except Exception as e:
+ return str(e)
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/json_process/tools/parse.yaml b/api/core/tools/provider/builtin/json_process/tools/parse.yaml
new file mode 100644
index 00000000000000..b619dcde94171d
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/tools/parse.yaml
@@ -0,0 +1,40 @@
+identity:
+ name: parse
+ author: Mingwei Zhang
+ label:
+ en_US: JSON Parse
+ zh_Hans: JSON 解析
+ pt_BR: JSON Parse
+description:
+ human:
+ en_US: A tool for extracting JSON objects
+ zh_Hans: 一个解析JSON对象的工具
+ pt_BR: A tool for extracting JSON objects
+ llm: A tool for extracting JSON objects
+parameters:
+ - name: content
+ type: string
+ required: true
+ label:
+ en_US: JSON data
+ zh_Hans: JSON数据
+ pt_BR: JSON data
+ human_description:
+ en_US: JSON data
+ zh_Hans: JSON数据
+ pt_BR: JSON数据
+ llm_description: JSON data to be processed
+ form: llm
+ - name: json_filter
+ type: string
+ required: true
+ label:
+ en_US: JSON filter
+ zh_Hans: JSON解析对象
+ pt_BR: JSON filter
+ human_description:
+ en_US: JSON fields to be parsed
+ zh_Hans: 需要解析的 JSON 字段
+ pt_BR: JSON fields to be parsed
+ llm_description: JSON fields to be parsed
+ form: llm
diff --git a/api/core/tools/provider/builtin/json_process/tools/replace.py b/api/core/tools/provider/builtin/json_process/tools/replace.py
new file mode 100644
index 00000000000000..9f127b9d067319
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/tools/replace.py
@@ -0,0 +1,106 @@
+import json
+from typing import Any, Union
+
+from jsonpath_ng import parse
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+
+
+class JSONReplaceTool(BuiltinTool):
+ def _invoke(self,
+ user_id: str,
+ tool_parameters: dict[str, Any],
+ ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+ """
+ invoke tools
+ """
+ # get content
+ content = tool_parameters.get('content', '')
+ if not content:
+ return self.create_text_message('Invalid parameter content')
+
+ # get query
+ query = tool_parameters.get('query', '')
+ if not query:
+ return self.create_text_message('Invalid parameter query')
+
+ # get replace value
+ replace_value = tool_parameters.get('replace_value', '')
+ if not replace_value:
+ return self.create_text_message('Invalid parameter replace_value')
+
+ # get replace model
+ replace_model = tool_parameters.get('replace_model', '')
+ if not replace_model:
+ return self.create_text_message('Invalid parameter replace_model')
+
+ try:
+ if replace_model == 'pattern':
+ # get replace pattern
+ replace_pattern = tool_parameters.get('replace_pattern', '')
+ if not replace_pattern:
+ return self.create_text_message('Invalid parameter replace_pattern')
+ result = self._replace_pattern(content, query, replace_pattern, replace_value)
+ elif replace_model == 'key':
+ result = self._replace_key(content, query, replace_value)
+ elif replace_model == 'value':
+ result = self._replace_value(content, query, replace_value)
+ return self.create_text_message(str(result))
+ except Exception:
+ return self.create_text_message('Failed to replace JSON content')
+
+ # Replace pattern
+ def _replace_pattern(self, content: str, query: str, replace_pattern: str, replace_value: str) -> str:
+ try:
+ input_data = json.loads(content)
+ expr = parse(query)
+
+ matches = expr.find(input_data)
+
+ for match in matches:
+ new_value = match.value.replace(replace_pattern, replace_value)
+ match.full_path.update(input_data, new_value)
+
+ return json.dumps(input_data, ensure_ascii=True)
+ except Exception as e:
+ return str(e)
+
+ # Replace key
+ def _replace_key(self, content: str, query: str, replace_value: str) -> str:
+ try:
+ input_data = json.loads(content)
+ expr = parse(query)
+
+ matches = expr.find(input_data)
+
+ for match in matches:
+ parent = match.context.value
+ if isinstance(parent, dict):
+ old_key = match.path.fields[0]
+ if old_key in parent:
+ value = parent.pop(old_key)
+ parent[replace_value] = value
+ elif isinstance(parent, list):
+ for item in parent:
+ if isinstance(item, dict) and old_key in item:
+ value = item.pop(old_key)
+ item[replace_value] = value
+ return json.dumps(input_data, ensure_ascii=True)
+ except Exception as e:
+ return str(e)
+
+ # Replace value
+ def _replace_value(self, content: str, query: str, replace_value: str) -> str:
+ try:
+ input_data = json.loads(content)
+ expr = parse(query)
+
+ matches = expr.find(input_data)
+
+ for match in matches:
+ match.full_path.update(input_data, replace_value)
+
+ return json.dumps(input_data, ensure_ascii=True)
+ except Exception as e:
+ return str(e)
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/json_process/tools/replace.yaml b/api/core/tools/provider/builtin/json_process/tools/replace.yaml
new file mode 100644
index 00000000000000..556be5e8b2bf66
--- /dev/null
+++ b/api/core/tools/provider/builtin/json_process/tools/replace.yaml
@@ -0,0 +1,95 @@
+identity:
+ name: json_replace
+ author: Mingwei Zhang
+ label:
+ en_US: JSON Replace
+ zh_Hans: JSON 替换
+ pt_BR: JSON Replace
+description:
+ human:
+ en_US: A tool for replacing JSON content
+ zh_Hans: 一个替换 JSON 内容的工具
+ pt_BR: A tool for replacing JSON content
+ llm: A tool for replacing JSON content
+parameters:
+ - name: content
+ type: string
+ required: true
+ label:
+ en_US: JSON content
+ zh_Hans: JSON 内容
+ pt_BR: JSON content
+ human_description:
+ en_US: JSON content
+ zh_Hans: JSON 内容
+ pt_BR: JSON content
+ llm_description: JSON content to be processed
+ form: llm
+ - name: query
+ type: string
+ required: true
+ label:
+ en_US: Query
+ zh_Hans: 查询
+ pt_BR: Query
+ human_description:
+ en_US: Query
+ zh_Hans: 查询
+ pt_BR: Query
+ llm_description: JSONPath query to locate the element to replace
+ form: llm
+ - name: replace_pattern
+ type: string
+ required: false
+ label:
+ en_US: String to be replaced
+ zh_Hans: 待替换字符串
+ pt_BR: String to be replaced
+ human_description:
+ en_US: String to be replaced
+ zh_Hans: 待替换字符串
+ pt_BR: String to be replaced
+ llm_description: String to be replaced
+ form: llm
+ - name: replace_value
+ type: string
+ required: true
+ label:
+ en_US: Replace Value
+ zh_Hans: 替换值
+ pt_BR: Replace Value
+ human_description:
+ en_US: New Value
+ zh_Hans: New Value
+ pt_BR: New Value
+ llm_description: New Value to replace
+ form: llm
+ - name: replace_model
+ type: select
+ required: true
+ default: pattern
+ label:
+ en_US: Replace Model
+ zh_Hans: 替换模式
+ pt_BR: Replace Model
+ human_description:
+ en_US: Replace Model
+ zh_Hans: 替换模式
+ pt_BR: Replace Model
+ options:
+ - value: key
+ label:
+ en_US: replace key
+ zh_Hans: 键替换
+ pt_BR: replace key
+ - value: value
+ label:
+ en_US: replace value
+ zh_Hans: 值替换
+ pt_BR: replace value
+ - value: pattern
+ label:
+ en_US: replace string
+ zh_Hans: 字符串替换
+ pt_BR: replace string
+ form: form
diff --git a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py
index ea0cdf96e7d0f3..2fb96679e4283a 100644
--- a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py
+++ b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py
@@ -131,7 +131,7 @@ def _run(self, variable_pool: VariablePool) -> NodeRunResult:
return NodeRunResult(
status=WorkflowNodeExecutionStatus.FAILED,
inputs=inputs,
- process_data={},
+ process_data=process_data,
outputs={
'__is_success': 0,
'__reason': str(e)
diff --git a/api/migrations/versions/b2602e131636_add_workflow_run_id_index_for_message.py b/api/migrations/versions/b2602e131636_add_workflow_run_id_index_for_message.py
new file mode 100644
index 00000000000000..c9a6a5a5a7d90f
--- /dev/null
+++ b/api/migrations/versions/b2602e131636_add_workflow_run_id_index_for_message.py
@@ -0,0 +1,32 @@
+"""add workflow_run_id index for message
+
+Revision ID: b2602e131636
+Revises: 63f9175e515b
+Create Date: 2024-06-29 12:16:51.646346
+
+"""
+from alembic import op
+
+import models as models
+
+# revision identifiers, used by Alembic.
+revision = 'b2602e131636'
+down_revision = '63f9175e515b'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ with op.batch_alter_table('messages', schema=None) as batch_op:
+ batch_op.create_index('message_workflow_run_id_idx', ['conversation_id', 'workflow_run_id'], unique=False)
+
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ with op.batch_alter_table('messages', schema=None) as batch_op:
+ batch_op.drop_index('message_workflow_run_id_idx')
+
+ # ### end Alembic commands ###
diff --git a/api/models/account.py b/api/models/account.py
index 784c760df3b3b4..9187d053133fc9 100644
--- a/api/models/account.py
+++ b/api/models/account.py
@@ -169,8 +169,7 @@ class Tenant(db.Model):
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
- def get_accounts(self) -> list[db.Model]:
- Account = db.Model
+ def get_accounts(self) -> list[Account]:
return db.session.query(Account).filter(
Account.id == TenantAccountJoin.account_id,
TenantAccountJoin.tenant_id == self.id
diff --git a/api/models/model.py b/api/models/model.py
index 07d7f6d8917f8e..f59e8ebb7c0f58 100644
--- a/api/models/model.py
+++ b/api/models/model.py
@@ -626,6 +626,7 @@ class Message(db.Model):
db.Index('message_conversation_id_idx', 'conversation_id'),
db.Index('message_end_user_idx', 'app_id', 'from_source', 'from_end_user_id'),
db.Index('message_account_idx', 'app_id', 'from_source', 'from_account_id'),
+ db.Index('message_workflow_run_id_idx', 'conversation_id', 'workflow_run_id')
)
id = db.Column(StringUUID, server_default=db.text('uuid_generate_v4()'))
diff --git a/api/poetry.lock b/api/poetry.lock
index 2fdc13bb969d70..ce2adec183ff53 100644
--- a/api/poetry.lock
+++ b/api/poetry.lock
@@ -3702,6 +3702,20 @@ files = [
{file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"},
]
+[[package]]
+name = "jsonpath-ng"
+version = "1.6.1"
+description = "A final implementation of JSONPath for Python that aims to be standard compliant, including arithmetic and binary comparison operators and providing clear AST for metaprogramming."
+optional = false
+python-versions = "*"
+files = [
+ {file = "jsonpath-ng-1.6.1.tar.gz", hash = "sha256:086c37ba4917304850bd837aeab806670224d3f038fe2833ff593a672ef0a5fa"},
+ {file = "jsonpath_ng-1.6.1-py3-none-any.whl", hash = "sha256:8f22cd8273d7772eea9aaa84d922e0841aa36fdb8a2c6b7f6c3791a16a9bc0be"},
+]
+
+[package.dependencies]
+ply = "*"
+
[[package]]
name = "kaleido"
version = "0.2.1"
@@ -9081,4 +9095,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
-content-hash = "5f6f7d8114ece4f7e865fdf9a1fc38a86238d6bc80333b878d50c95a7885b9f5"
+content-hash = "d40bed69caecf3a2bcd5ec054288d7cb36a9a231fff210d4f1a42745dd3bf604"
diff --git a/api/pyproject.toml b/api/pyproject.toml
index ed85e79d1ab4a6..89539f93d8f0f5 100644
--- a/api/pyproject.toml
+++ b/api/pyproject.toml
@@ -187,6 +187,7 @@ arxiv = "2.1.0"
matplotlib = "~3.8.2"
newspaper3k = "0.2.8"
duckduckgo-search = "~6.1.5"
+jsonpath-ng = "1.6.1"
numexpr = "~2.9.0"
opensearch-py = "2.4.0"
qrcode = "~7.4.2"
@@ -246,4 +247,4 @@ optional = true
[tool.poetry.group.lint.dependencies]
ruff = "~0.4.8"
-dotenv-linter = "~0.5.0"
+dotenv-linter = "~0.5.0"
\ No newline at end of file
diff --git a/api/services/account_service.py b/api/services/account_service.py
index aec8227e4beb08..3112ad80a8b871 100644
--- a/api/services/account_service.py
+++ b/api/services/account_service.py
@@ -17,6 +17,7 @@
from libs.password import compare_password, hash_password, valid_password
from libs.rsa import generate_key_pair
from models.account import *
+from models.model import DifySetup
from services.errors.account import (
AccountAlreadyInTenantError,
AccountLoginError,
@@ -119,10 +120,11 @@ def update_account_password(account, password, new_password):
return account
@staticmethod
- def create_account(email: str, name: str, interface_language: str,
- password: str = None,
- interface_theme: str = 'light',
- timezone: str = 'America/New_York', ) -> Account:
+ def create_account(email: str,
+ name: str,
+ interface_language: str,
+ password: Optional[str] = None,
+ interface_theme: str = 'light') -> Account:
"""create account"""
account = Account()
account.email = email
@@ -200,7 +202,6 @@ def update_last_login(account: Account, *, ip_address: str) -> None:
account.last_login_ip = ip_address
db.session.add(account)
db.session.commit()
- logging.info(f'Account {account.id} logged in successfully.')
@staticmethod
def login(account: Account, *, ip_address: Optional[str] = None):
@@ -466,8 +467,51 @@ def _get_invitation_token_key(cls, token: str) -> str:
return f'member_invite:token:{token}'
@classmethod
- def register(cls, email, name, password: str = None, open_id: str = None, provider: str = None,
- language: str = None, status: AccountStatus = None) -> Account:
+ def setup(cls, email: str, name: str, password: str, ip_address: str) -> None:
+ """
+ Setup dify
+
+ :param email: email
+ :param name: username
+ :param password: password
+ :param ip_address: ip address
+ """
+ try:
+ # Register
+ account = AccountService.create_account(
+ email=email,
+ name=name,
+ interface_language=languages[0],
+ password=password,
+ )
+
+ account.last_login_ip = ip_address
+ account.initialized_at = datetime.now(timezone.utc).replace(tzinfo=None)
+
+ TenantService.create_owner_tenant_if_not_exist(account)
+
+ dify_setup = DifySetup(
+ version=current_app.config['CURRENT_VERSION']
+ )
+ db.session.add(dify_setup)
+ db.session.commit()
+ except Exception as e:
+ db.session.query(DifySetup).delete()
+ db.session.query(TenantAccountJoin).delete()
+ db.session.query(Account).delete()
+ db.session.query(Tenant).delete()
+ db.session.commit()
+
+ logging.exception(f'Setup failed: {e}')
+ raise ValueError(f'Setup failed: {e}')
+
+ @classmethod
+ def register(cls, email, name,
+ password: Optional[str] = None,
+ open_id: Optional[str] = None,
+ provider: Optional[str] = None,
+ language: Optional[str] = None,
+ status: Optional[AccountStatus] = None) -> Account:
db.session.begin_nested()
"""Register account"""
try:
diff --git a/docker/docker-compose.chroma.yaml b/docker-legacy/docker-compose.chroma.yaml
similarity index 100%
rename from docker/docker-compose.chroma.yaml
rename to docker-legacy/docker-compose.chroma.yaml
diff --git a/docker-legacy/docker-compose.middleware.yaml b/docker-legacy/docker-compose.middleware.yaml
new file mode 100644
index 00000000000000..38760901b186ca
--- /dev/null
+++ b/docker-legacy/docker-compose.middleware.yaml
@@ -0,0 +1,109 @@
+version: '3'
+services:
+ # The postgres database.
+ db:
+ image: postgres:15-alpine
+ restart: always
+ environment:
+ # The password for the default postgres user.
+ POSTGRES_PASSWORD: difyai123456
+ # The name of the default postgres database.
+ POSTGRES_DB: dify
+ # postgres data directory
+ PGDATA: /var/lib/postgresql/data/pgdata
+ volumes:
+ - ./volumes/db/data:/var/lib/postgresql/data
+ ports:
+ - "5432:5432"
+
+ # The redis cache.
+ redis:
+ image: redis:6-alpine
+ restart: always
+ volumes:
+ # Mount the redis data directory to the container.
+ - ./volumes/redis/data:/data
+ # Set the redis password when startup redis server.
+ command: redis-server --requirepass difyai123456
+ ports:
+ - "6379:6379"
+
+ # The Weaviate vector store.
+ weaviate:
+ image: semitechnologies/weaviate:1.19.0
+ restart: always
+ volumes:
+ # Mount the Weaviate data directory to the container.
+ - ./volumes/weaviate:/var/lib/weaviate
+ environment:
+ # The Weaviate configurations
+ # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
+ QUERY_DEFAULTS_LIMIT: 25
+ AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'false'
+ PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
+ DEFAULT_VECTORIZER_MODULE: 'none'
+ CLUSTER_HOSTNAME: 'node1'
+ AUTHENTICATION_APIKEY_ENABLED: 'true'
+ AUTHENTICATION_APIKEY_ALLOWED_KEYS: 'WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih'
+ AUTHENTICATION_APIKEY_USERS: 'hello@dify.ai'
+ AUTHORIZATION_ADMINLIST_ENABLED: 'true'
+ AUTHORIZATION_ADMINLIST_USERS: 'hello@dify.ai'
+ ports:
+ - "8080:8080"
+
+ # The DifySandbox
+ sandbox:
+ image: langgenius/dify-sandbox:0.2.1
+ restart: always
+ environment:
+ # The DifySandbox configurations
+ # Make sure you are changing this key for your deployment with a strong key.
+ # You can generate a strong key using `openssl rand -base64 42`.
+ API_KEY: dify-sandbox
+ GIN_MODE: 'release'
+ WORKER_TIMEOUT: 15
+ ENABLE_NETWORK: 'true'
+ HTTP_PROXY: 'http://ssrf_proxy:3128'
+ HTTPS_PROXY: 'http://ssrf_proxy:3128'
+ SANDBOX_PORT: 8194
+ volumes:
+ - ./volumes/sandbox/dependencies:/dependencies
+ networks:
+ - ssrf_proxy_network
+
+ # ssrf_proxy server
+ # for more information, please refer to
+ # https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-16.-why-is-ssrf_proxy-needed
+ ssrf_proxy:
+ image: ubuntu/squid:latest
+ restart: always
+ ports:
+ - "3128:3128"
+ - "8194:8194"
+ volumes:
+ # pls clearly modify the squid.conf file to fit your network environment.
+ - ./volumes/ssrf_proxy/squid.conf:/etc/squid/squid.conf
+ networks:
+ - ssrf_proxy_network
+ - default
+ # Qdrant vector store.
+ # uncomment to use qdrant as vector store.
+ # (if uncommented, you need to comment out the weaviate service above,
+ # and set VECTOR_STORE to qdrant in the api & worker service.)
+ # qdrant:
+ # image: qdrant/qdrant:1.7.3
+ # restart: always
+ # volumes:
+ # - ./volumes/qdrant:/qdrant/storage
+ # environment:
+ # QDRANT_API_KEY: 'difyai123456'
+ # ports:
+ # - "6333:6333"
+ # - "6334:6334"
+
+
+networks:
+ # create a network between sandbox, api and ssrf_proxy, and can not access outside.
+ ssrf_proxy_network:
+ driver: bridge
+ internal: true
diff --git a/docker/docker-compose.milvus.yaml b/docker-legacy/docker-compose.milvus.yaml
similarity index 100%
rename from docker/docker-compose.milvus.yaml
rename to docker-legacy/docker-compose.milvus.yaml
diff --git a/docker/docker-compose.opensearch.yml b/docker-legacy/docker-compose.opensearch.yml
similarity index 100%
rename from docker/docker-compose.opensearch.yml
rename to docker-legacy/docker-compose.opensearch.yml
diff --git a/docker/docker-compose.oracle.yaml b/docker-legacy/docker-compose.oracle.yaml
similarity index 100%
rename from docker/docker-compose.oracle.yaml
rename to docker-legacy/docker-compose.oracle.yaml
diff --git a/docker/docker-compose.pgvecto-rs.yaml b/docker-legacy/docker-compose.pgvecto-rs.yaml
similarity index 100%
rename from docker/docker-compose.pgvecto-rs.yaml
rename to docker-legacy/docker-compose.pgvecto-rs.yaml
diff --git a/docker/docker-compose.pgvector.yaml b/docker-legacy/docker-compose.pgvector.yaml
similarity index 100%
rename from docker/docker-compose.pgvector.yaml
rename to docker-legacy/docker-compose.pgvector.yaml
diff --git a/docker-legacy/docker-compose.png b/docker-legacy/docker-compose.png
new file mode 100644
index 00000000000000..bdac113086d870
Binary files /dev/null and b/docker-legacy/docker-compose.png differ
diff --git a/docker/docker-compose.qdrant.yaml b/docker-legacy/docker-compose.qdrant.yaml
similarity index 100%
rename from docker/docker-compose.qdrant.yaml
rename to docker-legacy/docker-compose.qdrant.yaml
diff --git a/docker-legacy/docker-compose.yaml b/docker-legacy/docker-compose.yaml
new file mode 100644
index 00000000000000..eadaaced2c583d
--- /dev/null
+++ b/docker-legacy/docker-compose.yaml
@@ -0,0 +1,588 @@
+version: '3'
+services:
+ # API service
+ api:
+ image: langgenius/dify-api:0.6.12-fix1
+ restart: always
+ environment:
+ # Startup mode, 'api' starts the API server.
+ MODE: api
+ # The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
+ LOG_LEVEL: INFO
+ # enable DEBUG mode to output more logs
+ # DEBUG : true
+ # A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
+ SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
+ # The base URL of console application web frontend, refers to the Console base URL of WEB service if console domain is
+ # different from api or web app domain.
+ # example: http://cloud.dify.ai
+ CONSOLE_WEB_URL: ''
+ # Password for admin user initialization.
+ # If left unset, admin user will not be prompted for a password when creating the initial admin account.
+ INIT_PASSWORD: ''
+ # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
+ # different from api or web app domain.
+ # example: http://cloud.dify.ai
+ CONSOLE_API_URL: ''
+ # The URL prefix for Service API endpoints, refers to the base URL of the current API service if api domain is
+ # different from console domain.
+ # example: http://api.dify.ai
+ SERVICE_API_URL: ''
+ # The URL prefix for Web APP frontend, refers to the Web App base URL of WEB service if web app domain is different from
+ # console or api domain.
+ # example: http://udify.app
+ APP_WEB_URL: ''
+ # File preview or download Url prefix.
+ # used to display File preview or download Url to the front-end or as Multi-model inputs;
+ # Url is signed and has expiration time.
+ FILES_URL: ''
+ # File Access Time specifies a time interval in seconds for the file to be accessed.
+ # The default value is 300 seconds.
+ FILES_ACCESS_TIMEOUT: 300
+ # When enabled, migrations will be executed prior to application startup and the application will start after the migrations have completed.
+ MIGRATION_ENABLED: 'true'
+ # The configurations of postgres database connection.
+ # It is consistent with the configuration in the 'db' service below.
+ DB_USERNAME: postgres
+ DB_PASSWORD: difyai123456
+ DB_HOST: db
+ DB_PORT: 5432
+ DB_DATABASE: dify
+ # The configurations of redis connection.
+ # It is consistent with the configuration in the 'redis' service below.
+ REDIS_HOST: redis
+ REDIS_PORT: 6379
+ REDIS_USERNAME: ''
+ REDIS_PASSWORD: difyai123456
+ REDIS_USE_SSL: 'false'
+ # use redis db 0 for redis cache
+ REDIS_DB: 0
+ # The configurations of celery broker.
+ # Use redis as the broker, and redis db 1 for celery broker.
+ CELERY_BROKER_URL: redis://:difyai123456@redis:6379/1
+ # Specifies the allowed origins for cross-origin requests to the Web API, e.g. https://dify.app or * for all origins.
+ WEB_API_CORS_ALLOW_ORIGINS: '*'
+ # Specifies the allowed origins for cross-origin requests to the console API, e.g. https://cloud.dify.ai or * for all origins.
+ CONSOLE_CORS_ALLOW_ORIGINS: '*'
+ # CSRF Cookie settings
+ # Controls whether a cookie is sent with cross-site requests,
+ # providing some protection against cross-site request forgery attacks
+ #
+ # Default: `SameSite=Lax, Secure=false, HttpOnly=true`
+ # This default configuration supports same-origin requests using either HTTP or HTTPS,
+ # but does not support cross-origin requests. It is suitable for local debugging purposes.
+ #
+ # If you want to enable cross-origin support,
+ # you must use the HTTPS protocol and set the configuration to `SameSite=None, Secure=true, HttpOnly=true`.
+ #
+ # The type of storage to use for storing user files. Supported values are `local` and `s3` and `azure-blob` and `google-storage`, Default: `local`
+ STORAGE_TYPE: local
+ # The path to the local storage directory, the directory relative the root path of API service codes or absolute path. Default: `storage` or `/home/john/storage`.
+ # only available when STORAGE_TYPE is `local`.
+ STORAGE_LOCAL_PATH: storage
+ # The S3 storage configurations, only available when STORAGE_TYPE is `s3`.
+ S3_USE_AWS_MANAGED_IAM: 'false'
+ S3_ENDPOINT: 'https://xxx.r2.cloudflarestorage.com'
+ S3_BUCKET_NAME: 'difyai'
+ S3_ACCESS_KEY: 'ak-difyai'
+ S3_SECRET_KEY: 'sk-difyai'
+ S3_REGION: 'us-east-1'
+ # The Azure Blob storage configurations, only available when STORAGE_TYPE is `azure-blob`.
+ AZURE_BLOB_ACCOUNT_NAME: 'difyai'
+ AZURE_BLOB_ACCOUNT_KEY: 'difyai'
+ AZURE_BLOB_CONTAINER_NAME: 'difyai-container'
+ AZURE_BLOB_ACCOUNT_URL: 'https://.blob.core.windows.net'
+ # The Google storage configurations, only available when STORAGE_TYPE is `google-storage`.
+ GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name'
+ # if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty.
+ GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string'
+ # The Alibaba Cloud OSS configurations, only available when STORAGE_TYPE is `aliyun-oss`
+ ALIYUN_OSS_BUCKET_NAME: 'your-bucket-name'
+ ALIYUN_OSS_ACCESS_KEY: 'your-access-key'
+ ALIYUN_OSS_SECRET_KEY: 'your-secret-key'
+ ALIYUN_OSS_ENDPOINT: 'https://oss-ap-southeast-1-internal.aliyuncs.com'
+ ALIYUN_OSS_REGION: 'ap-southeast-1'
+ ALIYUN_OSS_AUTH_VERSION: 'v4'
+ # The Tencent COS storage configurations, only available when STORAGE_TYPE is `tencent-cos`.
+ TENCENT_COS_BUCKET_NAME: 'your-bucket-name'
+ TENCENT_COS_SECRET_KEY: 'your-secret-key'
+ TENCENT_COS_SECRET_ID: 'your-secret-id'
+ TENCENT_COS_REGION: 'your-region'
+ TENCENT_COS_SCHEME: 'your-scheme'
+ # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`,`pgvector`, `chroma`, 'opensearch', 'tidb_vector'.
+ VECTOR_STORE: weaviate
+ # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
+ WEAVIATE_ENDPOINT: http://weaviate:8080
+ # The Weaviate API key.
+ WEAVIATE_API_KEY: WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
+ # The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
+ QDRANT_URL: http://qdrant:6333
+ # The Qdrant API key.
+ QDRANT_API_KEY: difyai123456
+ # The Qdrant client timeout setting.
+ QDRANT_CLIENT_TIMEOUT: 20
+ # The Qdrant client enable gRPC mode.
+ QDRANT_GRPC_ENABLED: 'false'
+ # The Qdrant server gRPC mode PORT.
+ QDRANT_GRPC_PORT: 6334
+ # Milvus configuration Only available when VECTOR_STORE is `milvus`.
+ # The milvus host.
+ MILVUS_HOST: 127.0.0.1
+ # The milvus host.
+ MILVUS_PORT: 19530
+ # The milvus username.
+ MILVUS_USER: root
+ # The milvus password.
+ MILVUS_PASSWORD: Milvus
+ # The milvus tls switch.
+ MILVUS_SECURE: 'false'
+ # relyt configurations
+ RELYT_HOST: db
+ RELYT_PORT: 5432
+ RELYT_USER: postgres
+ RELYT_PASSWORD: difyai123456
+ RELYT_DATABASE: postgres
+ # pgvector configurations
+ PGVECTOR_HOST: pgvector
+ PGVECTOR_PORT: 5432
+ PGVECTOR_USER: postgres
+ PGVECTOR_PASSWORD: difyai123456
+ PGVECTOR_DATABASE: dify
+ # tidb vector configurations
+ TIDB_VECTOR_HOST: tidb
+ TIDB_VECTOR_PORT: 4000
+ TIDB_VECTOR_USER: xxx.root
+ TIDB_VECTOR_PASSWORD: xxxxxx
+ TIDB_VECTOR_DATABASE: dify
+ # oracle configurations
+ ORACLE_HOST: oracle
+ ORACLE_PORT: 1521
+ ORACLE_USER: dify
+ ORACLE_PASSWORD: dify
+ ORACLE_DATABASE: FREEPDB1
+ # Chroma configuration
+ CHROMA_HOST: 127.0.0.1
+ CHROMA_PORT: 8000
+ CHROMA_TENANT: default_tenant
+ CHROMA_DATABASE: default_database
+ CHROMA_AUTH_PROVIDER: chromadb.auth.token_authn.TokenAuthClientProvider
+ CHROMA_AUTH_CREDENTIALS: xxxxxx
+ # Mail configuration, support: resend, smtp
+ MAIL_TYPE: ''
+ # default send from email address, if not specified
+ MAIL_DEFAULT_SEND_FROM: 'YOUR EMAIL FROM (eg: no-reply )'
+ SMTP_SERVER: ''
+ SMTP_PORT: 465
+ SMTP_USERNAME: ''
+ SMTP_PASSWORD: ''
+ SMTP_USE_TLS: 'true'
+ SMTP_OPPORTUNISTIC_TLS: 'false'
+ # the api-key for resend (https://resend.com)
+ RESEND_API_KEY: ''
+ RESEND_API_URL: https://api.resend.com
+ # The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
+ SENTRY_DSN: ''
+ # The sample rate for Sentry events. Default: `1.0`
+ SENTRY_TRACES_SAMPLE_RATE: 1.0
+ # The sample rate for Sentry profiles. Default: `1.0`
+ SENTRY_PROFILES_SAMPLE_RATE: 1.0
+ # Notion import configuration, support public and internal
+ NOTION_INTEGRATION_TYPE: public
+ NOTION_CLIENT_SECRET: you-client-secret
+ NOTION_CLIENT_ID: you-client-id
+ NOTION_INTERNAL_SECRET: you-internal-secret
+ # The sandbox service endpoint.
+ CODE_EXECUTION_ENDPOINT: "http://sandbox:8194"
+ CODE_EXECUTION_API_KEY: dify-sandbox
+ CODE_MAX_NUMBER: 9223372036854775807
+ CODE_MIN_NUMBER: -9223372036854775808
+ CODE_MAX_STRING_LENGTH: 80000
+ TEMPLATE_TRANSFORM_MAX_LENGTH: 80000
+ CODE_MAX_STRING_ARRAY_LENGTH: 30
+ CODE_MAX_OBJECT_ARRAY_LENGTH: 30
+ CODE_MAX_NUMBER_ARRAY_LENGTH: 1000
+ # SSRF Proxy server
+ SSRF_PROXY_HTTP_URL: 'http://ssrf_proxy:3128'
+ SSRF_PROXY_HTTPS_URL: 'http://ssrf_proxy:3128'
+ # Indexing configuration
+ INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: 1000
+ depends_on:
+ - db
+ - redis
+ volumes:
+ # Mount the storage directory to the container, for storing user files.
+ - ./volumes/app/storage:/app/api/storage
+ # uncomment to expose dify-api port to host
+ # ports:
+ # - "5001:5001"
+ networks:
+ - ssrf_proxy_network
+ - default
+
+ # worker service
+ # The Celery worker for processing the queue.
+ worker:
+ image: langgenius/dify-api:0.6.12-fix1
+ restart: always
+ environment:
+ CONSOLE_WEB_URL: ''
+ # Startup mode, 'worker' starts the Celery worker for processing the queue.
+ MODE: worker
+
+ # --- All the configurations below are the same as those in the 'api' service. ---
+
+ # The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
+ LOG_LEVEL: INFO
+ # A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
+ # same as the API service
+ SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
+ # The configurations of postgres database connection.
+ # It is consistent with the configuration in the 'db' service below.
+ DB_USERNAME: postgres
+ DB_PASSWORD: difyai123456
+ DB_HOST: db
+ DB_PORT: 5432
+ DB_DATABASE: dify
+ # The configurations of redis cache connection.
+ REDIS_HOST: redis
+ REDIS_PORT: 6379
+ REDIS_USERNAME: ''
+ REDIS_PASSWORD: difyai123456
+ REDIS_DB: 0
+ REDIS_USE_SSL: 'false'
+ # The configurations of celery broker.
+ CELERY_BROKER_URL: redis://:difyai123456@redis:6379/1
+ # The type of storage to use for storing user files. Supported values are `local` and `s3` and `azure-blob` and `google-storage`, Default: `local`
+ STORAGE_TYPE: local
+ STORAGE_LOCAL_PATH: storage
+ # The S3 storage configurations, only available when STORAGE_TYPE is `s3`.
+ S3_USE_AWS_MANAGED_IAM: 'false'
+ S3_ENDPOINT: 'https://xxx.r2.cloudflarestorage.com'
+ S3_BUCKET_NAME: 'difyai'
+ S3_ACCESS_KEY: 'ak-difyai'
+ S3_SECRET_KEY: 'sk-difyai'
+ S3_REGION: 'us-east-1'
+ # The Azure Blob storage configurations, only available when STORAGE_TYPE is `azure-blob`.
+ AZURE_BLOB_ACCOUNT_NAME: 'difyai'
+ AZURE_BLOB_ACCOUNT_KEY: 'difyai'
+ AZURE_BLOB_CONTAINER_NAME: 'difyai-container'
+ AZURE_BLOB_ACCOUNT_URL: 'https://.blob.core.windows.net'
+ # The Google storage configurations, only available when STORAGE_TYPE is `google-storage`.
+ GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name'
+ # if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty.
+ GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string'
+ # The Alibaba Cloud OSS configurations, only available when STORAGE_TYPE is `aliyun-oss`
+ ALIYUN_OSS_BUCKET_NAME: 'your-bucket-name'
+ ALIYUN_OSS_ACCESS_KEY: 'your-access-key'
+ ALIYUN_OSS_SECRET_KEY: 'your-secret-key'
+ ALIYUN_OSS_ENDPOINT: 'https://oss-ap-southeast-1-internal.aliyuncs.com'
+ ALIYUN_OSS_REGION: 'ap-southeast-1'
+ ALIYUN_OSS_AUTH_VERSION: 'v4'
+ # The Tencent COS storage configurations, only available when STORAGE_TYPE is `tencent-cos`.
+ TENCENT_COS_BUCKET_NAME: 'your-bucket-name'
+ TENCENT_COS_SECRET_KEY: 'your-secret-key'
+ TENCENT_COS_SECRET_ID: 'your-secret-id'
+ TENCENT_COS_REGION: 'your-region'
+ TENCENT_COS_SCHEME: 'your-scheme'
+ # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`, `chroma`, 'opensearch', 'tidb_vector'.
+ VECTOR_STORE: weaviate
+ # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
+ WEAVIATE_ENDPOINT: http://weaviate:8080
+ # The Weaviate API key.
+ WEAVIATE_API_KEY: WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
+ # The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
+ QDRANT_URL: http://qdrant:6333
+ # The Qdrant API key.
+ QDRANT_API_KEY: difyai123456
+ # The Qdrant client timeout setting.
+ QDRANT_CLIENT_TIMEOUT: 20
+ # The Qdrant client enable gRPC mode.
+ QDRANT_GRPC_ENABLED: 'false'
+ # The Qdrant server gRPC mode PORT.
+ QDRANT_GRPC_PORT: 6334
+ # Milvus configuration Only available when VECTOR_STORE is `milvus`.
+ # The milvus host.
+ MILVUS_HOST: 127.0.0.1
+ # The milvus host.
+ MILVUS_PORT: 19530
+ # The milvus username.
+ MILVUS_USER: root
+ # The milvus password.
+ MILVUS_PASSWORD: Milvus
+ # The milvus tls switch.
+ MILVUS_SECURE: 'false'
+ # Mail configuration, support: resend
+ MAIL_TYPE: ''
+ # default send from email address, if not specified
+ MAIL_DEFAULT_SEND_FROM: 'YOUR EMAIL FROM (eg: no-reply )'
+ SMTP_SERVER: ''
+ SMTP_PORT: 465
+ SMTP_USERNAME: ''
+ SMTP_PASSWORD: ''
+ SMTP_USE_TLS: 'true'
+ SMTP_OPPORTUNISTIC_TLS: 'false'
+ # the api-key for resend (https://resend.com)
+ RESEND_API_KEY: ''
+ RESEND_API_URL: https://api.resend.com
+ # relyt configurations
+ RELYT_HOST: db
+ RELYT_PORT: 5432
+ RELYT_USER: postgres
+ RELYT_PASSWORD: difyai123456
+ RELYT_DATABASE: postgres
+ # tencent configurations
+ TENCENT_VECTOR_DB_URL: http://127.0.0.1
+ TENCENT_VECTOR_DB_API_KEY: dify
+ TENCENT_VECTOR_DB_TIMEOUT: 30
+ TENCENT_VECTOR_DB_USERNAME: dify
+ TENCENT_VECTOR_DB_DATABASE: dify
+ TENCENT_VECTOR_DB_SHARD: 1
+ TENCENT_VECTOR_DB_REPLICAS: 2
+ # OpenSearch configuration
+ OPENSEARCH_HOST: 127.0.0.1
+ OPENSEARCH_PORT: 9200
+ OPENSEARCH_USER: admin
+ OPENSEARCH_PASSWORD: admin
+ OPENSEARCH_SECURE: 'true'
+ # pgvector configurations
+ PGVECTOR_HOST: pgvector
+ PGVECTOR_PORT: 5432
+ PGVECTOR_USER: postgres
+ PGVECTOR_PASSWORD: difyai123456
+ PGVECTOR_DATABASE: dify
+ # tidb vector configurations
+ TIDB_VECTOR_HOST: tidb
+ TIDB_VECTOR_PORT: 4000
+ TIDB_VECTOR_USER: xxx.root
+ TIDB_VECTOR_PASSWORD: xxxxxx
+ TIDB_VECTOR_DATABASE: dify
+ # oracle configurations
+ ORACLE_HOST: oracle
+ ORACLE_PORT: 1521
+ ORACLE_USER: dify
+ ORACLE_PASSWORD: dify
+ ORACLE_DATABASE: FREEPDB1
+ # Chroma configuration
+ CHROMA_HOST: 127.0.0.1
+ CHROMA_PORT: 8000
+ CHROMA_TENANT: default_tenant
+ CHROMA_DATABASE: default_database
+ CHROMA_AUTH_PROVIDER: chromadb.auth.token_authn.TokenAuthClientProvider
+ CHROMA_AUTH_CREDENTIALS: xxxxxx
+ # Notion import configuration, support public and internal
+ NOTION_INTEGRATION_TYPE: public
+ NOTION_CLIENT_SECRET: you-client-secret
+ NOTION_CLIENT_ID: you-client-id
+ NOTION_INTERNAL_SECRET: you-internal-secret
+ # Indexing configuration
+ INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: 1000
+ depends_on:
+ - db
+ - redis
+ volumes:
+ # Mount the storage directory to the container, for storing user files.
+ - ./volumes/app/storage:/app/api/storage
+ networks:
+ - ssrf_proxy_network
+ - default
+
+ # Frontend web application.
+ web:
+ image: langgenius/dify-web:0.6.12-fix1
+ restart: always
+ environment:
+ # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
+ # different from api or web app domain.
+ # example: http://cloud.dify.ai
+ CONSOLE_API_URL: ''
+ # The URL for Web APP api server, refers to the Web App base URL of WEB service if web app domain is different from
+ # console or api domain.
+ # example: http://udify.app
+ APP_API_URL: ''
+ # The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
+ SENTRY_DSN: ''
+ # uncomment to expose dify-web port to host
+ # ports:
+ # - "3000:3000"
+
+ # The postgres database.
+ db:
+ image: postgres:15-alpine
+ restart: always
+ environment:
+ PGUSER: postgres
+ # The password for the default postgres user.
+ POSTGRES_PASSWORD: difyai123456
+ # The name of the default postgres database.
+ POSTGRES_DB: dify
+ # postgres data directory
+ PGDATA: /var/lib/postgresql/data/pgdata
+ volumes:
+ - ./volumes/db/data:/var/lib/postgresql/data
+ # notice!: if you use windows-wsl2, postgres may not work properly due to the ntfs issue.you can use volumes to mount the data directory to the host.
+ # if you use the following config, you need to uncomment the volumes configuration below at the end of the file.
+ # - postgres:/var/lib/postgresql/data
+ # uncomment to expose db(postgresql) port to host
+ # ports:
+ # - "5432:5432"
+ healthcheck:
+ test: [ "CMD", "pg_isready" ]
+ interval: 1s
+ timeout: 3s
+ retries: 30
+
+ # The redis cache.
+ redis:
+ image: redis:6-alpine
+ restart: always
+ volumes:
+ # Mount the redis data directory to the container.
+ - ./volumes/redis/data:/data
+ # Set the redis password when startup redis server.
+ command: redis-server --requirepass difyai123456
+ healthcheck:
+ test: [ "CMD", "redis-cli", "ping" ]
+ # uncomment to expose redis port to host
+ # ports:
+ # - "6379:6379"
+
+ # The Weaviate vector store.
+ weaviate:
+ image: semitechnologies/weaviate:1.19.0
+ restart: always
+ volumes:
+ # Mount the Weaviate data directory to the container.
+ - ./volumes/weaviate:/var/lib/weaviate
+ environment:
+ # The Weaviate configurations
+ # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
+ QUERY_DEFAULTS_LIMIT: 25
+ AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'false'
+ PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
+ DEFAULT_VECTORIZER_MODULE: 'none'
+ CLUSTER_HOSTNAME: 'node1'
+ AUTHENTICATION_APIKEY_ENABLED: 'true'
+ AUTHENTICATION_APIKEY_ALLOWED_KEYS: 'WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih'
+ AUTHENTICATION_APIKEY_USERS: 'hello@dify.ai'
+ AUTHORIZATION_ADMINLIST_ENABLED: 'true'
+ AUTHORIZATION_ADMINLIST_USERS: 'hello@dify.ai'
+ # uncomment to expose weaviate port to host
+ # ports:
+ # - "8080:8080"
+
+ # The DifySandbox
+ sandbox:
+ image: langgenius/dify-sandbox:0.2.1
+ restart: always
+ environment:
+ # The DifySandbox configurations
+ # Make sure you are changing this key for your deployment with a strong key.
+ # You can generate a strong key using `openssl rand -base64 42`.
+ API_KEY: dify-sandbox
+ GIN_MODE: 'release'
+ WORKER_TIMEOUT: 15
+ ENABLE_NETWORK: 'true'
+ HTTP_PROXY: 'http://ssrf_proxy:3128'
+ HTTPS_PROXY: 'http://ssrf_proxy:3128'
+ SANDBOX_PORT: 8194
+ volumes:
+ - ./volumes/sandbox/dependencies:/dependencies
+ networks:
+ - ssrf_proxy_network
+
+ # ssrf_proxy server
+ # for more information, please refer to
+ # https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-16.-why-is-ssrf_proxy-needed
+ ssrf_proxy:
+ image: ubuntu/squid:latest
+ restart: always
+ volumes:
+ # pls clearly modify the squid.conf file to fit your network environment.
+ - ./volumes/ssrf_proxy/squid.conf:/etc/squid/squid.conf
+ networks:
+ - ssrf_proxy_network
+ - default
+ # Qdrant vector store.
+ # uncomment to use qdrant as vector store.
+ # (if uncommented, you need to comment out the weaviate service above,
+ # and set VECTOR_STORE to qdrant in the api & worker service.)
+ # qdrant:
+ # image: langgenius/qdrant:v1.7.3
+ # restart: always
+ # volumes:
+ # - ./volumes/qdrant:/qdrant/storage
+ # environment:
+ # QDRANT_API_KEY: 'difyai123456'
+ # # uncomment to expose qdrant port to host
+ # # ports:
+ # # - "6333:6333"
+ # # - "6334:6334"
+
+ # The pgvector vector database.
+ # Uncomment to use qdrant as vector store.
+ # pgvector:
+ # image: pgvector/pgvector:pg16
+ # restart: always
+ # environment:
+ # PGUSER: postgres
+ # # The password for the default postgres user.
+ # POSTGRES_PASSWORD: difyai123456
+ # # The name of the default postgres database.
+ # POSTGRES_DB: dify
+ # # postgres data directory
+ # PGDATA: /var/lib/postgresql/data/pgdata
+ # volumes:
+ # - ./volumes/pgvector/data:/var/lib/postgresql/data
+ # # uncomment to expose db(postgresql) port to host
+ # # ports:
+ # # - "5433:5432"
+ # healthcheck:
+ # test: [ "CMD", "pg_isready" ]
+ # interval: 1s
+ # timeout: 3s
+ # retries: 30
+
+ # The oracle vector database.
+ # Uncomment to use oracle23ai as vector store. Also need to Uncomment volumes block
+ # oracle:
+ # image: container-registry.oracle.com/database/free:latest
+ # restart: always
+ # ports:
+ # - 1521:1521
+ # volumes:
+ # - type: volume
+ # source: oradata
+ # target: /opt/oracle/oradata
+ # - ./startupscripts:/opt/oracle/scripts/startup
+ # environment:
+ # - ORACLE_PWD=Dify123456
+ # - ORACLE_CHARACTERSET=AL32UTF8
+
+
+ # The nginx reverse proxy.
+ # used for reverse proxying the API service and Web service.
+ nginx:
+ image: nginx:latest
+ restart: always
+ volumes:
+ - ./nginx/nginx.conf:/etc/nginx/nginx.conf
+ - ./nginx/proxy.conf:/etc/nginx/proxy.conf
+ - ./nginx/conf.d:/etc/nginx/conf.d
+ #- ./nginx/ssl:/etc/ssl
+ depends_on:
+ - api
+ - web
+ ports:
+ - "80:80"
+ #- "443:443"
+# notice: if you use windows-wsl2, postgres may not work properly due to the ntfs issue.you can use volumes to mount the data directory to the host.
+# volumes:
+# postgres:
+networks:
+ # create a network between sandbox, api and ssrf_proxy, and can not access outside.
+ ssrf_proxy_network:
+ driver: bridge
+ internal: true
+
+#volumes:
+# oradata:
diff --git a/docker/nginx/conf.d/default.conf b/docker-legacy/nginx/conf.d/default.conf
similarity index 100%
rename from docker/nginx/conf.d/default.conf
rename to docker-legacy/nginx/conf.d/default.conf
diff --git a/docker/nginx/nginx.conf b/docker-legacy/nginx/nginx.conf
similarity index 100%
rename from docker/nginx/nginx.conf
rename to docker-legacy/nginx/nginx.conf
diff --git a/docker/nginx/proxy.conf b/docker-legacy/nginx/proxy.conf
similarity index 100%
rename from docker/nginx/proxy.conf
rename to docker-legacy/nginx/proxy.conf
diff --git a/docker-legacy/nginx/ssl/.gitkeep b/docker-legacy/nginx/ssl/.gitkeep
new file mode 100644
index 00000000000000..8b137891791fe9
--- /dev/null
+++ b/docker-legacy/nginx/ssl/.gitkeep
@@ -0,0 +1 @@
+
diff --git a/docker-legacy/startupscripts/create_user.sql b/docker-legacy/startupscripts/create_user.sql
new file mode 100755
index 00000000000000..b80e19c3b05a06
--- /dev/null
+++ b/docker-legacy/startupscripts/create_user.sql
@@ -0,0 +1,5 @@
+show pdbs;
+ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE;
+alter session set container= freepdb1;
+create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users;
+grant DB_DEVELOPER_ROLE to dify;
diff --git a/docker-legacy/volumes/opensearch/opensearch_dashboards.yml b/docker-legacy/volumes/opensearch/opensearch_dashboards.yml
new file mode 100644
index 00000000000000..f50d63bbb9f425
--- /dev/null
+++ b/docker-legacy/volumes/opensearch/opensearch_dashboards.yml
@@ -0,0 +1,222 @@
+---
+# Copyright OpenSearch Contributors
+# SPDX-License-Identifier: Apache-2.0
+
+# Description:
+# Default configuration for OpenSearch Dashboards
+
+# OpenSearch Dashboards is served by a back end server. This setting specifies the port to use.
+# server.port: 5601
+
+# Specifies the address to which the OpenSearch Dashboards server will bind. IP addresses and host names are both valid values.
+# The default is 'localhost', which usually means remote machines will not be able to connect.
+# To allow connections from remote users, set this parameter to a non-loopback address.
+# server.host: "localhost"
+
+# Enables you to specify a path to mount OpenSearch Dashboards at if you are running behind a proxy.
+# Use the `server.rewriteBasePath` setting to tell OpenSearch Dashboards if it should remove the basePath
+# from requests it receives, and to prevent a deprecation warning at startup.
+# This setting cannot end in a slash.
+# server.basePath: ""
+
+# Specifies whether OpenSearch Dashboards should rewrite requests that are prefixed with
+# `server.basePath` or require that they are rewritten by your reverse proxy.
+# server.rewriteBasePath: false
+
+# The maximum payload size in bytes for incoming server requests.
+# server.maxPayloadBytes: 1048576
+
+# The OpenSearch Dashboards server's name. This is used for display purposes.
+# server.name: "your-hostname"
+
+# The URLs of the OpenSearch instances to use for all your queries.
+# opensearch.hosts: ["http://localhost:9200"]
+
+# OpenSearch Dashboards uses an index in OpenSearch to store saved searches, visualizations and
+# dashboards. OpenSearch Dashboards creates a new index if the index doesn't already exist.
+# opensearchDashboards.index: ".opensearch_dashboards"
+
+# The default application to load.
+# opensearchDashboards.defaultAppId: "home"
+
+# Setting for an optimized healthcheck that only uses the local OpenSearch node to do Dashboards healthcheck.
+# This settings should be used for large clusters or for clusters with ingest heavy nodes.
+# It allows Dashboards to only healthcheck using the local OpenSearch node rather than fan out requests across all nodes.
+#
+# It requires the user to create an OpenSearch node attribute with the same name as the value used in the setting
+# This node attribute should assign all nodes of the same cluster an integer value that increments with each new cluster that is spun up
+# e.g. in opensearch.yml file you would set the value to a setting using node.attr.cluster_id:
+# Should only be enabled if there is a corresponding node attribute created in your OpenSearch config that matches the value here
+# opensearch.optimizedHealthcheckId: "cluster_id"
+
+# If your OpenSearch is protected with basic authentication, these settings provide
+# the username and password that the OpenSearch Dashboards server uses to perform maintenance on the OpenSearch Dashboards
+# index at startup. Your OpenSearch Dashboards users still need to authenticate with OpenSearch, which
+# is proxied through the OpenSearch Dashboards server.
+# opensearch.username: "opensearch_dashboards_system"
+# opensearch.password: "pass"
+
+# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
+# These settings enable SSL for outgoing requests from the OpenSearch Dashboards server to the browser.
+# server.ssl.enabled: false
+# server.ssl.certificate: /path/to/your/server.crt
+# server.ssl.key: /path/to/your/server.key
+
+# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
+# These files are used to verify the identity of OpenSearch Dashboards to OpenSearch and are required when
+# xpack.security.http.ssl.client_authentication in OpenSearch is set to required.
+# opensearch.ssl.certificate: /path/to/your/client.crt
+# opensearch.ssl.key: /path/to/your/client.key
+
+# Optional setting that enables you to specify a path to the PEM file for the certificate
+# authority for your OpenSearch instance.
+# opensearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
+
+# To disregard the validity of SSL certificates, change this setting's value to 'none'.
+# opensearch.ssl.verificationMode: full
+
+# Time in milliseconds to wait for OpenSearch to respond to pings. Defaults to the value of
+# the opensearch.requestTimeout setting.
+# opensearch.pingTimeout: 1500
+
+# Time in milliseconds to wait for responses from the back end or OpenSearch. This value
+# must be a positive integer.
+# opensearch.requestTimeout: 30000
+
+# List of OpenSearch Dashboards client-side headers to send to OpenSearch. To send *no* client-side
+# headers, set this value to [] (an empty list).
+# opensearch.requestHeadersWhitelist: [ authorization ]
+
+# Header names and values that are sent to OpenSearch. Any custom headers cannot be overwritten
+# by client-side headers, regardless of the opensearch.requestHeadersWhitelist configuration.
+# opensearch.customHeaders: {}
+
+# Time in milliseconds for OpenSearch to wait for responses from shards. Set to 0 to disable.
+# opensearch.shardTimeout: 30000
+
+# Logs queries sent to OpenSearch. Requires logging.verbose set to true.
+# opensearch.logQueries: false
+
+# Specifies the path where OpenSearch Dashboards creates the process ID file.
+# pid.file: /var/run/opensearchDashboards.pid
+
+# Enables you to specify a file where OpenSearch Dashboards stores log output.
+# logging.dest: stdout
+
+# Set the value of this setting to true to suppress all logging output.
+# logging.silent: false
+
+# Set the value of this setting to true to suppress all logging output other than error messages.
+# logging.quiet: false
+
+# Set the value of this setting to true to log all events, including system usage information
+# and all requests.
+# logging.verbose: false
+
+# Set the interval in milliseconds to sample system and process performance
+# metrics. Minimum is 100ms. Defaults to 5000.
+# ops.interval: 5000
+
+# Specifies locale to be used for all localizable strings, dates and number formats.
+# Supported languages are the following: English - en , by default , Chinese - zh-CN .
+# i18n.locale: "en"
+
+# Set the allowlist to check input graphite Url. Allowlist is the default check list.
+# vis_type_timeline.graphiteAllowedUrls: ['https://www.hostedgraphite.com/UID/ACCESS_KEY/graphite']
+
+# Set the blocklist to check input graphite Url. Blocklist is an IP list.
+# Below is an example for reference
+# vis_type_timeline.graphiteBlockedIPs: [
+# //Loopback
+# '127.0.0.0/8',
+# '::1/128',
+# //Link-local Address for IPv6
+# 'fe80::/10',
+# //Private IP address for IPv4
+# '10.0.0.0/8',
+# '172.16.0.0/12',
+# '192.168.0.0/16',
+# //Unique local address (ULA)
+# 'fc00::/7',
+# //Reserved IP address
+# '0.0.0.0/8',
+# '100.64.0.0/10',
+# '192.0.0.0/24',
+# '192.0.2.0/24',
+# '198.18.0.0/15',
+# '192.88.99.0/24',
+# '198.51.100.0/24',
+# '203.0.113.0/24',
+# '224.0.0.0/4',
+# '240.0.0.0/4',
+# '255.255.255.255/32',
+# '::/128',
+# '2001:db8::/32',
+# 'ff00::/8',
+# ]
+# vis_type_timeline.graphiteBlockedIPs: []
+
+# opensearchDashboards.branding:
+# logo:
+# defaultUrl: ""
+# darkModeUrl: ""
+# mark:
+# defaultUrl: ""
+# darkModeUrl: ""
+# loadingLogo:
+# defaultUrl: ""
+# darkModeUrl: ""
+# faviconUrl: ""
+# applicationTitle: ""
+
+# Set the value of this setting to true to capture region blocked warnings and errors
+# for your map rendering services.
+# map.showRegionBlockedWarning: false%
+
+# Set the value of this setting to false to suppress search usage telemetry
+# for reducing the load of OpenSearch cluster.
+# data.search.usageTelemetry.enabled: false
+
+# 2.4 renames 'wizard.enabled: false' to 'vis_builder.enabled: false'
+# Set the value of this setting to false to disable VisBuilder
+# functionality in Visualization.
+# vis_builder.enabled: false
+
+# 2.4 New Experimental Feature
+# Set the value of this setting to true to enable the experimental multiple data source
+# support feature. Use with caution.
+# data_source.enabled: false
+# Set the value of these settings to customize crypto materials to encryption saved credentials
+# in data sources.
+# data_source.encryption.wrappingKeyName: 'changeme'
+# data_source.encryption.wrappingKeyNamespace: 'changeme'
+# data_source.encryption.wrappingKey: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+
+# 2.6 New ML Commons Dashboards Feature
+# Set the value of this setting to true to enable the ml commons dashboards
+# ml_commons_dashboards.enabled: false
+
+# 2.12 New experimental Assistant Dashboards Feature
+# Set the value of this setting to true to enable the assistant dashboards
+# assistant.chat.enabled: false
+
+# 2.13 New Query Assistant Feature
+# Set the value of this setting to false to disable the query assistant
+# observability.query_assist.enabled: false
+
+# 2.14 Enable Ui Metric Collectors in Usage Collector
+# Set the value of this setting to true to enable UI Metric collections
+# usageCollection.uiMetric.enabled: false
+
+opensearch.hosts: [https://localhost:9200]
+opensearch.ssl.verificationMode: none
+opensearch.username: admin
+opensearch.password: 'Qazwsxedc!@#123'
+opensearch.requestHeadersWhitelist: [authorization, securitytenant]
+
+opensearch_security.multitenancy.enabled: true
+opensearch_security.multitenancy.tenants.preferred: [Private, Global]
+opensearch_security.readonly_mode.roles: [kibana_read_only]
+# Use this setting if you are running opensearch-dashboards without https
+opensearch_security.cookie.secure: false
+server.host: '0.0.0.0'
diff --git a/docker-legacy/volumes/sandbox/dependencies/python-requirements.txt b/docker-legacy/volumes/sandbox/dependencies/python-requirements.txt
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/docker/volumes/ssrf_proxy/squid.conf b/docker-legacy/volumes/ssrf_proxy/squid.conf
similarity index 100%
rename from docker/volumes/ssrf_proxy/squid.conf
rename to docker-legacy/volumes/ssrf_proxy/squid.conf
diff --git a/docker/.env.example b/docker/.env.example
new file mode 100644
index 00000000000000..eb4a04351f1356
--- /dev/null
+++ b/docker/.env.example
@@ -0,0 +1,604 @@
+# ------------------------------
+# Environment Variables for API service & worker
+# ------------------------------
+
+# ------------------------------
+# Common Variables
+# ------------------------------
+
+# The backend URL of the console API,
+# used to concatenate the authorization callback.
+# If empty, it is the same domain.
+# Example: https://api.console.dify.ai
+CONSOLE_API_URL=
+
+# The front-end URL of the console web,
+# used to concatenate some front-end addresses and for CORS configuration use.
+# If empty, it is the same domain.
+# Example: https://console.dify.ai
+CONSOLE_WEB_URL=
+
+# Service API Url,
+# used to display Service API Base Url to the front-end.
+# If empty, it is the same domain.
+# Example: https://api.dify.ai
+SERVICE_API_URL=
+
+# WebApp API backend Url,
+# used to declare the back-end URL for the front-end API.
+# If empty, it is the same domain.
+# Example: https://api.app.dify.ai
+APP_API_URL=
+
+# WebApp Url,
+# used to display WebAPP API Base Url to the front-end.
+# If empty, it is the same domain.
+# Example: https://app.dify.ai
+APP_WEB_URL=
+
+# File preview or download Url prefix.
+# used to display File preview or download Url to the front-end or as Multi-model inputs;
+# Url is signed and has expiration time.
+FILES_URL=
+
+# ------------------------------
+# Server Configuration
+# ------------------------------
+
+# The log level for the application.
+# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
+LOG_LEVEL=INFO
+
+# Debug mode, default is false.
+# It is recommended to turn on this configuration for local development
+# to prevent some problems caused by monkey patch.
+DEBUG=false
+
+# Flask debug mode, it can output trace information at the interface when turned on,
+# which is convenient for debugging.
+FLASK_DEBUG=false
+
+# A secretkey that is used for securely signing the session cookie
+# and encrypting sensitive information on the database.
+# You can generate a strong key using `openssl rand -base64 42`.
+SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
+
+# Password for admin user initialization.
+# If left unset, admin user will not be prompted for a password
+# when creating the initial admin account.
+INIT_PASSWORD=
+
+# Deployment environment.
+# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`.
+# Testing environment. There will be a distinct color label on the front-end page,
+# indicating that this environment is a testing environment.
+DEPLOY_ENV=PRODUCTION
+
+# Whether to enable the version check policy.
+# If set to false, https://updates.dify.ai will not be called for version check.
+CHECK_UPDATE_URL=false
+
+# Used to change the OpenAI base address, default is https://api.openai.com/v1.
+# When OpenAI cannot be accessed in China, replace it with a domestic mirror address,
+# or when a local model provides OpenAI compatible API, it can be replaced.
+OPENAI_API_BASE=https://api.openai.com/v1
+
+# When enabled, migrations will be executed prior to application startup
+# and the application will start after the migrations have completed.
+MIGRATION_ENABLED=true
+
+# File Access Time specifies a time interval in seconds for the file to be accessed.
+# The default value is 300 seconds.
+FILES_ACCESS_TIMEOUT=300
+
+# ------------------------------
+# Container Startup Related Configuration
+# Only effective when starting with docker image or docker-compose.
+# ------------------------------
+
+# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed.
+DIFY_BIND_ADDRESS=
+
+# API service binding port number, default 5001.
+DIFY_PORT=
+
+# The number of API server workers, i.e., the number of gevent workers.
+# Formula: number of cpu cores x 2 + 1
+# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
+SERVER_WORKER_AMOUNT=
+
+# Defaults to gevent. If using windows, it can be switched to sync or solo.
+SERVER_WORKER_CLASS=
+
+# Similar to SERVER_WORKER_CLASS. Default is gevent.
+# If using windows, it can be switched to sync or solo.
+CELERY_WORKER_CLASS=
+
+# Request handling timeout. The default is 200,
+# it is recommended to set it to 360 to support a longer sse connection time.
+GUNICORN_TIMEOUT=360
+
+# The number of Celery workers. The default is 1, and can be set as needed.
+CELERY_WORKER_AMOUNT=
+
+# ------------------------------
+# Database Configuration
+# The database uses PostgreSQL. Please use the public schema.
+# It is consistent with the configuration in the 'db' service below.
+# ------------------------------
+
+DB_USERNAME=postgres
+DB_PASSWORD=difyai123456
+DB_HOST=db
+DB_PORT=5432
+DB_DATABASE=dify
+# The size of the database connection pool.
+# The default is 30 connections, which can be appropriately increased.
+SQLALCHEMY_POOL_SIZE=30
+# Database connection pool recycling time, the default is 3600 seconds.
+SQLALCHEMY_POOL_RECYCLE=3600
+# Whether to print SQL, default is false.
+SQLALCHEMY_ECHO=false
+
+# ------------------------------
+# Redis Configuration
+# This Redis configuration is used for caching and for pub/sub during conversation.
+# ------------------------------
+
+REDIS_HOST=redis
+REDIS_PORT=6379
+REDIS_USERNAME=
+REDIS_PASSWORD=difyai123456
+REDIS_USE_SSL=false
+
+# ------------------------------
+# Celery Configuration
+# ------------------------------
+
+# Use redis as the broker, and redis db 1 for celery broker.
+# Format as follows: `redis://:@:/`
+# Example: redis://:difyai123456@redis:6379/1
+CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1
+BROKER_USE_SSL=false
+
+# ------------------------------
+# CORS Configuration
+# Used to set the front-end cross-domain access policy.
+# ------------------------------
+
+# Specifies the allowed origins for cross-origin requests to the Web API,
+# e.g. https://dify.app or * for all origins.
+WEB_API_CORS_ALLOW_ORIGINS=*
+
+# Specifies the allowed origins for cross-origin requests to the console API,
+# e.g. https://cloud.dify.ai or * for all origins.
+CONSOLE_CORS_ALLOW_ORIGINS=*
+
+# ------------------------------
+# File Storage Configuration
+# ------------------------------
+
+# The type of storage to use for storing user files.
+# Supported values are `local` and `s3` and `azure-blob` and `google-storage` and `tencent-cos`,
+# Default: `local`
+STORAGE_TYPE=local
+
+# S3 Configuration
+# Whether to use AWS managed IAM roles for authenticating with the S3 service.
+# If set to false, the access key and secret key must be provided.
+S3_USE_AWS_MANAGED_IAM=false
+# The endpoint of the S3 service.
+S3_ENDPOINT=
+# The region of the S3 service.
+S3_REGION=us-east-1
+# The name of the S3 bucket to use for storing files.
+S3_BUCKET_NAME=difyai
+# The access key to use for authenticating with the S3 service.
+S3_ACCESS_KEY=
+# The secret key to use for authenticating with the S3 service.
+S3_SECRET_KEY=
+
+# Azure Blob Configuration
+# The name of the Azure Blob Storage account to use for storing files.
+AZURE_BLOB_ACCOUNT_NAME=difyai
+# The access key to use for authenticating with the Azure Blob Storage account.
+AZURE_BLOB_ACCOUNT_KEY=difyai
+# The name of the Azure Blob Storage container to use for storing files.
+AZURE_BLOB_CONTAINER_NAME=difyai-container
+# The URL of the Azure Blob Storage account.
+AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net
+
+# Google Storage Configuration
+# The name of the Google Storage bucket to use for storing files.
+GOOGLE_STORAGE_BUCKET_NAME=yout-bucket-name
+# The service account JSON key to use for authenticating with the Google Storage service.
+GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string
+
+# The Alibaba Cloud OSS configurations,
+# only available when STORAGE_TYPE is `aliyun-oss`
+ALIYUN_OSS_BUCKET_NAME=your-bucket-name
+ALIYUN_OSS_ACCESS_KEY=your-access-key
+ALIYUN_OSS_SECRET_KEY=your-secret-key
+ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com
+ALIYUN_OSS_REGION=ap-southeast-1
+ALIYUN_OSS_AUTH_VERSION=v4
+
+# Tencent COS Configuration
+# The name of the Tencent COS bucket to use for storing files.
+TENCENT_COS_BUCKET_NAME=your-bucket-name
+# The secret key to use for authenticating with the Tencent COS service.
+TENCENT_COS_SECRET_KEY=your-secret-key
+# The secret id to use for authenticating with the Tencent COS service.
+TENCENT_COS_SECRET_ID=your-secret-id
+# The region of the Tencent COS service.
+TENCENT_COS_REGION=your-region
+# The scheme of the Tencent COS service.
+TENCENT_COS_SCHEME=your-scheme
+
+# ------------------------------
+# Vector Database Configuration
+# ------------------------------
+
+# The type of vector store to use.
+# Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`.
+VECTOR_STORE=weaviate
+
+# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
+WEAVIATE_ENDPOINT=http://weaviate:8080
+# The Weaviate API key.
+WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
+
+# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
+QDRANT_URL=http://qdrant:6333
+# The Qdrant API key.
+QDRANT_API_KEY=difyai123456
+# The Qdrant client timeout setting.
+QDRANT_CLIENT_TIMEOUT=20
+# The Qdrant client enable gRPC mode.
+QDRANT_GRPC_ENABLED=false
+# The Qdrant server gRPC mode PORT.
+QDRANT_GRPC_PORT=6334
+
+# Milvus configuration Only available when VECTOR_STORE is `milvus`.
+# The milvus host.
+MILVUS_HOST=127.0.0.1
+# The milvus host.
+MILVUS_PORT=19530
+# The milvus username.
+MILVUS_USER=root
+# The milvus password.
+MILVUS_PASSWORD=Milvus
+# The milvus tls switch.
+MILVUS_SECURE=false
+
+# pgvector configurations, only available when VECTOR_STORE is `pgvecto-rs or pgvector`
+PGVECTOR_HOST=pgvector
+PGVECTOR_PORT=5432
+PGVECTOR_USER=postgres
+PGVECTOR_PASSWORD=difyai123456
+PGVECTOR_DATABASE=dify
+
+# TiDB vector configurations, only available when VECTOR_STORE is `tidb`
+TIDB_VECTOR_HOST=tidb
+TIDB_VECTOR_PORT=4000
+TIDB_VECTOR_USER=xxx.root
+TIDB_VECTOR_PASSWORD=xxxxxx
+TIDB_VECTOR_DATABASE=dify
+
+# Chroma configuration, only available when VECTOR_STORE is `chroma`
+CHROMA_HOST=127.0.0.1
+CHROMA_PORT=8000
+CHROMA_TENANT=default_tenant
+CHROMA_DATABASE=default_database
+CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider
+CHROMA_AUTH_CREDENTIALS=xxxxxx
+
+# Oracle configuration, only available when VECTOR_STORE is `oracle`
+ORACLE_HOST=oracle
+ORACLE_PORT=1521
+ORACLE_USER=dify
+ORACLE_PASSWORD=dify
+ORACLE_DATABASE=FREEPDB1
+
+# relyt configurations, only available when VECTOR_STORE is `relyt`
+RELYT_HOST=db
+RELYT_PORT=5432
+RELYT_USER=postgres
+RELYT_PASSWORD=difyai123456
+RELYT_DATABASE=postgres
+
+# open search configuration, only available when VECTOR_STORE is `opensearch`
+OPENSEARCH_HOST=127.0.0.1
+OPENSEARCH_PORT=9200
+OPENSEARCH_USER=admin
+OPENSEARCH_PASSWORD=admin
+OPENSEARCH_SECURE=true
+
+# tencent vector configurations, only available when VECTOR_STORE is `tencent`
+TENCENT_VECTOR_DB_URL=http://127.0.0.1
+TENCENT_VECTOR_DB_API_KEY=dify
+TENCENT_VECTOR_DB_TIMEOUT=30
+TENCENT_VECTOR_DB_USERNAME=dify
+TENCENT_VECTOR_DB_DATABASE=dify
+TENCENT_VECTOR_DB_SHARD=1
+TENCENT_VECTOR_DB_REPLICAS=2
+
+# ------------------------------
+# Knowledge Configuration
+# ------------------------------
+
+# Upload file size limit, default 15M.
+UPLOAD_FILE_SIZE_LIMIT=15
+
+# The maximum number of files that can be uploaded at a time, default 5.
+UPLOAD_FILE_BATCH_LIMIT=5
+
+# ETl type, support: `dify`, `Unstructured`
+# `dify` Dify's proprietary file extraction scheme
+# `Unstructured` Unstructured.io file extraction scheme
+ETL_TYPE=dify
+
+# Unstructured API path, needs to be configured when ETL_TYPE is Unstructured.
+# For example: http://unstructured:8000/general/v0/general
+UNSTRUCTURED_API_URL=
+
+# ------------------------------
+# Multi-modal Configuration
+# ------------------------------
+
+# The format of the image sent when the multi-modal model is input,
+# the default is base64, optional url.
+# The delay of the call in url mode will be lower than that in base64 mode.
+# It is generally recommended to use the more compatible base64 mode.
+# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image.
+MULTIMODAL_SEND_IMAGE_FORMAT=base64
+
+# Upload image file size limit, default 10M.
+UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
+
+# ------------------------------
+# Sentry Configuration
+# Used for application monitoring and error log tracking.
+# ------------------------------
+
+# Sentry DSN address, default is empty, when empty,
+# all monitoring information is not reported to Sentry.
+# If not set, Sentry error reporting will be disabled.
+SENTRY_DSN=
+
+# The reporting ratio of Sentry events, if it is 0.01, it is 1%.
+SENTRY_TRACES_SAMPLE_RATE=1.0
+
+# The reporting ratio of Sentry profiles, if it is 0.01, it is 1%.
+SENTRY_PROFILES_SAMPLE_RATE=1.0
+
+# ------------------------------
+# Notion Integration Configuration
+# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations
+# ------------------------------
+
+# Configure as "public" or "internal".
+# Since Notion's OAuth redirect URL only supports HTTPS,
+# if deploying locally, please use Notion's internal integration.
+NOTION_INTEGRATION_TYPE=public
+# Notion OAuth client secret (used for public integration type)
+NOTION_CLIENT_SECRET=
+# Notion OAuth client id (used for public integration type)
+NOTION_CLIENT_ID=
+# Notion internal integration secret.
+# If the value of NOTION_INTEGRATION_TYPE is "internal",
+# you need to configure this variable.
+NOTION_INTERNAL_SECRET=
+
+# ------------------------------
+# Mail related configuration
+# ------------------------------
+
+# Mail type, support: resend, smtp
+MAIL_TYPE=resend
+
+# Default send from email address, if not specified
+MAIL_DEFAULT_SEND_FROM=
+
+# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`.
+RESEND_API_KEY=your-resend-api-key
+
+# SMTP server configuration, used when MAIL_TYPE is `smtp`
+SMTP_SERVER=
+SMTP_PORT=465
+SMTP_USERNAME=
+SMTP_PASSWORD=
+SMTP_USE_TLS=true
+SMTP_OPPORTUNISTIC_TLS=false
+
+# ------------------------------
+# Others Configuration
+# ------------------------------
+
+# Maximum length of segmentation tokens for indexing
+INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=1000
+
+# Member invitation link valid time (hours),
+# Default: 72.
+INVITE_EXPIRY_HOURS=72
+
+# The sandbox service endpoint.
+CODE_EXECUTION_ENDPOINT=http://sandbox:8194
+CODE_EXECUTION_API_KEY=dify-sandbox
+CODE_MAX_NUMBER=9223372036854775807
+CODE_MIN_NUMBER=-9223372036854775808
+CODE_MAX_STRING_LENGTH=80000
+TEMPLATE_TRANSFORM_MAX_LENGTH=80000
+CODE_MAX_STRING_ARRAY_LENGTH=30
+CODE_MAX_OBJECT_ARRAY_LENGTH=30
+CODE_MAX_NUMBER_ARRAY_LENGTH=1000
+
+# SSRF Proxy server HTTP URL
+SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128
+# SSRF Proxy server HTTPS URL
+SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128
+
+# ------------------------------
+# Environment Variables for db Service
+# ------------------------------
+
+PGUSER=${DB_USERNAME}
+# The password for the default postgres user.
+POSTGRES_PASSWORD=${DB_PASSWORD}
+# The name of the default postgres database.
+POSTGRES_DB=${DB_DATABASE}
+# postgres data directory
+PGDATA=/var/lib/postgresql/data/pgdata
+
+# ------------------------------
+# Environment Variables for sandbox Service
+# ------------------------------
+
+# The API key for the sandbox service
+API_KEY=dify-sandbox
+# The mode in which the Gin framework runs
+GIN_MODE=release
+# The timeout for the worker in seconds
+WORKER_TIMEOUT=15
+# Enable network for the sandbox service
+ENABLE_NETWORK=true
+# HTTP proxy URL for SSRF protection
+HTTP_PROXY=http://ssrf_proxy:3128
+# HTTPS proxy URL for SSRF protection
+HTTPS_PROXY=http://ssrf_proxy:3128
+# The port on which the sandbox service runs
+SANDBOX_PORT=8194
+
+# ------------------------------
+# Environment Variables for qdrant Service
+# (only used when VECTOR_STORE is qdrant)
+# ------------------------------
+QDRANT_API_KEY=difyai123456
+
+# ------------------------------
+# Environment Variables for weaviate Service
+# (only used when VECTOR_STORE is weaviate)
+# ------------------------------
+PERSISTENCE_DATA_PATH='/var/lib/weaviate'
+QUERY_DEFAULTS_LIMIT=25
+AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
+DEFAULT_VECTORIZER_MODULE=none
+CLUSTER_HOSTNAME=node1
+AUTHENTICATION_APIKEY_ENABLED=true
+AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
+AUTHENTICATION_APIKEY_USERS=hello@dify.ai
+AUTHORIZATION_ADMINLIST_ENABLED=true
+AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
+
+# ------------------------------
+# Environment Variables for Chroma
+# (only used when VECTOR_STORE is chroma)
+# ------------------------------
+
+# Authentication credentials for Chroma server
+CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456
+# Authentication provider for Chroma server
+CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider
+# Persistence setting for Chroma server
+IS_PERSISTENT=TRUE
+
+# ------------------------------
+# Environment Variables for Oracle Service
+# (only used when VECTOR_STORE is Oracle)
+# ------------------------------
+ORACLE_PWD=Dify123456
+ORACLE_CHARACTERSET=AL32UTF8
+
+# ------------------------------
+# Environment Variables for milvus Service
+# (only used when VECTOR_STORE is milvus)
+# ------------------------------
+# ETCD configuration for auto compaction mode
+ETCD_AUTO_COMPACTION_MODE=revision
+# ETCD configuration for auto compaction retention in terms of number of revisions
+ETCD_AUTO_COMPACTION_RETENTION=1000
+# ETCD configuration for backend quota in bytes
+ETCD_QUOTA_BACKEND_BYTES=4294967296
+# ETCD configuration for the number of changes before triggering a snapshot
+ETCD_SNAPSHOT_COUNT=50000
+# MinIO access key for authentication
+MINIO_ACCESS_KEY=minioadmin
+# MinIO secret key for authentication
+MINIO_SECRET_KEY=minioadmin
+# ETCD service endpoints
+ETCD_ENDPOINTS=etcd:2379
+# MinIO service address
+MINIO_ADDRESS=minio:9000
+# Enable or disable security authorization
+MILVUS_AUTHORIZATION_ENABLED=true
+
+# ------------------------------
+# Environment Variables for pgvector / pgvector-rs Service
+# (only used when VECTOR_STORE is pgvector / pgvector-rs)
+# ------------------------------
+PGVECTOR_PGUSER=postgres
+# The password for the default postgres user.
+PGVECTOR_POSTGRES_PASSWORD=difyai123456
+# The name of the default postgres database.
+PGVECTOR_POSTGRES_DB=dify
+# postgres data directory
+PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata
+
+# ------------------------------
+# Environment Variables for opensearch
+# (only used when VECTOR_STORE is opensearch)
+# ------------------------------
+OPENSEARCH_DISCOVERY_TYPE=single-node
+OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true
+OPENSEARCH_JAVA_OPTS_MIN=512m
+OPENSEARCH_JAVA_OPTS_MAX=1024m
+OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123
+OPENSEARCH_MEMLOCK_SOFT=-1
+OPENSEARCH_MEMLOCK_HARD=-1
+OPENSEARCH_NOFILE_SOFT=65536
+OPENSEARCH_NOFILE_HARD=65536
+
+# ------------------------------
+# Environment Variables for Nginx reverse proxy
+# ------------------------------
+NGINX_SERVER_NAME=_
+HTTPS_ENABLED=false
+# HTTP port
+NGINX_PORT=80
+# SSL settings are only applied when HTTPS_ENABLED is true
+NGINX_SSL_PORT=443
+# if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
+# and modify the env vars below accordingly.
+NGINX_SSL_CERT_FILENAME=dify.crt
+NGINX_SSL_CERT_KEY_FILENAME=dify.key
+NGINX_SSL_PROTOCOLS=TLSv1.1 TLSv1.2 TLSv1.3
+
+# Nginx performance tuning
+NGINX_WORKER_PROCESSES=auto
+NGINX_CLIENT_MAX_BODY_SIZE=15M
+NGINX_KEEPALIVE_TIMEOUT=65
+
+# Proxy settings
+NGINX_PROXY_READ_TIMEOUT=3600s
+NGINX_PROXY_SEND_TIMEOUT=3600s
+
+# ------------------------------
+# Environment Variables for SSRF Proxy
+# ------------------------------
+SSRF_HTTP_PORT=3128
+COREDUMP_DIR=/var/spool/squid
+REVERSE_PROXY_PORT=8194
+SANDBOX_HOST=sandbox
+
+# ------------------------------
+# docker env var for specifying vector db type at startup
+# (based on the vector db type, the corresponding docker
+# compose profile will be used)
+# ------------------------------
+COMPOSE_PROFILES=${VECTOR_STORE:-weaviate}
+
+# ------------------------------
+# Docker Compose Service Expose Host Port Configurations
+# ------------------------------
+EXPOSE_NGINX_PORT=80
+EXPOSE_NGINX_SSL_PORT=443
diff --git a/docker/.gitignore b/docker/.gitignore
new file mode 100644
index 00000000000000..c2e500f5e29091
--- /dev/null
+++ b/docker/.gitignore
@@ -0,0 +1 @@
+nginx/conf.d/default.conf
\ No newline at end of file
diff --git a/docker/README.md b/docker/README.md
new file mode 100644
index 00000000000000..6bff8bc3142ccc
--- /dev/null
+++ b/docker/README.md
@@ -0,0 +1,88 @@
+## README for docker Deployment
+
+Welcome to the new `docker` directory for deploying Dify using Docker Compose. This README outlines the updates, deployment instructions, and migration details for existing users.
+
+### What's Updated
+- **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments.
+
+ > What is `.env`?
+ > The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments.
+
+- **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file.
+- **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades.
+- **Legacy Support**: Previous deployment files are now located in the `docker-legacy` directory and will no longer be maintained.
+
+### How to Deploy Dify with `docker-compose.yaml`
+1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system.
+2. **Environment Setup**:
+ - Navigate to the `docker` directory.
+ - Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`.
+ - Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options.
+3. **Running the Services**:
+ - Execute `docker compose up` from the `docker` directory to start the services.
+ - To specify a vector database, set the `VECTOR_store` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`.
+
+### How to Deploy Middleware for Developing Dify
+1. **Middleware Setup**:
+ - Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches.
+ - Navigate to the `docker` directory.
+ - Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file).
+2. **Running Middleware Services**:
+ - Execute `docker-compose -f docker-compose.middleware.yaml up -d` to start the middleware services.
+
+### Migration for Existing Users
+For users migrating from the `docker-legacy` setup:
+1. **Review Changes**: Familiarize yourself with the new `.env` configuration and Docker Compose setup.
+2. **Transfer Customizations**:
+ - If you have customized configurations such as `docker-compose.yaml`, `ssrf_proxy/squid.conf`, or `nginx/conf.d/default.conf`, you will need to reflect these changes in the `.env` file you create.
+3. **Data Migration**:
+ - Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary.
+
+### Overview of `.env`
+
+#### Key Modules and Customization
+
+- **Vector Database Services**: Depending on the type of vector database used (`VECTOR_STORE`), users can set specific endpoints, ports, and authentication details.
+- **Storage Services**: Depending on the storage type (`STORAGE_TYPE`), users can configure specific settings for S3, Azure Blob, Google Storage, etc.
+- **API and Web Services**: Users can define URLs and other settings that affect how the API and web frontends operate.
+
+#### Other notable variables
+The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables:
+
+1. **Common Variables**:
+ - `CONSOLE_API_URL`, `SERVICE_API_URL`: URLs for different API services.
+ - `APP_WEB_URL`: Frontend application URL.
+ - `FILES_URL`: Base URL for file downloads and previews.
+
+2. **Server Configuration**:
+ - `LOG_LEVEL`, `DEBUG`, `FLASK_DEBUG`: Logging and debug settings.
+ - `SECRET_KEY`: A key for encrypting session cookies and other sensitive data.
+
+3. **Database Configuration**:
+ - `DB_USERNAME`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_DATABASE`: PostgreSQL database credentials and connection details.
+
+4. **Redis Configuration**:
+ - `REDIS_HOST`, `REDIS_PORT`, `REDIS_PASSWORD`: Redis server connection settings.
+
+5. **Celery Configuration**:
+ - `CELERY_BROKER_URL`: Configuration for Celery message broker.
+
+6. **Storage Configuration**:
+ - `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc.
+
+7. **Vector Database Configuration**:
+ - `VECTOR_STORE`: Type of vector database (e.g., `weaviate`, `milvus`).
+ - Specific settings for each vector store like `WEAVIATE_ENDPOINT`, `MILVUS_HOST`.
+
+8. **CORS Configuration**:
+ - `WEB_API_CORS_ALLOW_ORIGINS`, `CONSOLE_CORS_ALLOW_ORIGINS`: Settings for cross-origin resource sharing.
+
+9. **Other Service-Specific Environment Variables**:
+ - Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`.
+
+
+### Additional Information
+- **Continuous Improvement Phase**: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions.
+- **Support**: For detailed configuration options and environment variable settings, refer to the `.env.example` file and the Docker Compose configuration files in the `docker` directory.
+
+This README aims to guide you through the deployment process using the new Docker Compose setup. For any issues or further assistance, please refer to the official documentation or contact support.
\ No newline at end of file
diff --git a/docker/docker-compose.middleware.yaml b/docker/docker-compose.middleware.yaml
index 751344010d3422..ec0f5ba5e76137 100644
--- a/docker/docker-compose.middleware.yaml
+++ b/docker/docker-compose.middleware.yaml
@@ -3,17 +3,16 @@ services:
db:
image: postgres:15-alpine
restart: always
+ env_file:
+ - ./middleware.env
environment:
- # The password for the default postgres user.
- POSTGRES_PASSWORD: difyai123456
- # The name of the default postgres database.
- POSTGRES_DB: dify
- # postgres data directory
- PGDATA: /var/lib/postgresql/data/pgdata
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
+ POSTGRES_DB: ${POSTGRES_DB:-dify}
+ PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
ports:
- - "5432:5432"
+ - "${EXPOSE_POSTGRES_PORT:-5432}:5432"
# The redis cache.
redis:
@@ -25,30 +24,7 @@ services:
# Set the redis password when startup redis server.
command: redis-server --requirepass difyai123456
ports:
- - "6379:6379"
-
- # The Weaviate vector store.
- weaviate:
- image: semitechnologies/weaviate:1.19.0
- restart: always
- volumes:
- # Mount the Weaviate data directory to the container.
- - ./volumes/weaviate:/var/lib/weaviate
- environment:
- # The Weaviate configurations
- # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
- QUERY_DEFAULTS_LIMIT: 25
- AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'false'
- PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
- DEFAULT_VECTORIZER_MODULE: 'none'
- CLUSTER_HOSTNAME: 'node1'
- AUTHENTICATION_APIKEY_ENABLED: 'true'
- AUTHENTICATION_APIKEY_ALLOWED_KEYS: 'WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih'
- AUTHENTICATION_APIKEY_USERS: 'hello@dify.ai'
- AUTHORIZATION_ADMINLIST_ENABLED: 'true'
- AUTHORIZATION_ADMINLIST_USERS: 'hello@dify.ai'
- ports:
- - "8080:8080"
+ - "${EXPOSE_REDIS_PORT:-6379}:6379"
# The DifySandbox
sandbox:
@@ -58,13 +34,13 @@ services:
# The DifySandbox configurations
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
- API_KEY: dify-sandbox
- GIN_MODE: 'release'
- WORKER_TIMEOUT: 15
- ENABLE_NETWORK: 'true'
- HTTP_PROXY: 'http://ssrf_proxy:3128'
- HTTPS_PROXY: 'http://ssrf_proxy:3128'
- SANDBOX_PORT: 8194
+ API_KEY: ${API_KEY:-dify-sandbox}
+ GIN_MODE: ${GIN_MODE:-release}
+ WORKER_TIMEOUT: ${WORKER_TIMEOUT:-15}
+ ENABLE_NETWORK: ${ENABLE_NETWORK:-true}
+ HTTP_PROXY: ${HTTP_PROXY:-http://ssrf_proxy:3128}
+ HTTPS_PROXY: ${HTTPS_PROXY:-http://ssrf_proxy:3128}
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
volumes:
- ./volumes/sandbox/dependencies:/dependencies
networks:
@@ -76,30 +52,50 @@ services:
ssrf_proxy:
image: ubuntu/squid:latest
restart: always
- ports:
- - "3128:3128"
- - "8194:8194"
volumes:
- # pls clearly modify the squid.conf file to fit your network environment.
- - ./volumes/ssrf_proxy/squid.conf:/etc/squid/squid.conf
+ - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
+ - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint.sh
+ entrypoint: /docker-entrypoint.sh
+ environment:
+ # pls clearly modify the squid env vars to fit your network environment.
+ HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
+ COREDUMP_DIR: ${COREDUMP_DIR:-/var/spool/squid}
+ REVERSE_PROXY_PORT: ${REVERSE_PROXY_PORT:-8194}
+ SANDBOX_HOST: ${SANDBOX_HOST:-sandbox}
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
+ ports:
+ - "${EXPOSE_SSRF_PROXY_PORT:-3128}:${SSRF_HTTP_PORT:-3128}"
+ - "${EXPOSE_SANDBOX_PORT:-8194}:${SANDBOX_PORT:-8194}"
networks:
- ssrf_proxy_network
- default
- # Qdrant vector store.
- # uncomment to use qdrant as vector store.
- # (if uncommented, you need to comment out the weaviate service above,
- # and set VECTOR_STORE to qdrant in the api & worker service.)
- # qdrant:
- # image: qdrant/qdrant:1.7.3
- # restart: always
- # volumes:
- # - ./volumes/qdrant:/qdrant/storage
- # environment:
- # QDRANT_API_KEY: 'difyai123456'
- # ports:
- # - "6333:6333"
- # - "6334:6334"
+ # The Weaviate vector store.
+ weaviate:
+ image: semitechnologies/weaviate:1.19.0
+ profiles:
+ - weaviate
+ restart: always
+ volumes:
+ # Mount the Weaviate data directory to the container.
+ - ./volumes/weaviate:/var/lib/weaviate
+ env_file:
+ - ./middleware.env
+ environment:
+ # The Weaviate configurations
+ # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
+ PERSISTENCE_DATA_PATH: ${PERSISTENCE_DATA_PATH:-'/var/lib/weaviate'}
+ QUERY_DEFAULTS_LIMIT: ${QUERY_DEFAULTS_LIMIT:-25}
+ AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
+ DEFAULT_VECTORIZER_MODULE: ${DEFAULT_VECTORIZER_MODULE:-none}
+ CLUSTER_HOSTNAME: ${CLUSTER_HOSTNAME:-node1}
+ AUTHENTICATION_APIKEY_ENABLED: ${AUTHENTICATION_APIKEY_ENABLED:-true}
+ AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
+ AUTHENTICATION_APIKEY_USERS: ${AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
+ AUTHORIZATION_ADMINLIST_ENABLED: ${AUTHORIZATION_ADMINLIST_ENABLED:-true}
+ AUTHORIZATION_ADMINLIST_USERS: ${AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
+ ports:
+ - "${EXPOSE_WEAVIATE_PORT:-8080}:8080"
networks:
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml
index e5f1db8500ad4f..0fbd8e24df26e2 100644
--- a/docker/docker-compose.yaml
+++ b/docker/docker-compose.yaml
@@ -1,219 +1,277 @@
+x-shared-env: &shared-api-worker-env
+ # The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
+ LOG_LEVEL: ${LOG_LEVEL:-INFO}
+ # Debug mode, default is false. It is recommended to turn on this configuration for local development to prevent some problems caused by monkey patch.
+ DEBUG: ${DEBUG:-false}
+ # Flask debug mode, it can output trace information at the interface when turned on, which is convenient for debugging.
+ FLASK_DEBUG: ${FLASK_DEBUG:-false}
+ # A secretkey that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
+ SECRET_KEY: ${SECRET_KEY}
+ # Password for admin user initialization.
+ # If left unset, admin user will not be prompted for a password when creating the initial admin account.
+ INIT_PASSWORD: ${INIT_PASSWORD}
+ # The base URL of console application web frontend, refers to the Console base URL of WEB service if console domain is
+ # different from api or web app domain.
+ # example: http://cloud.dify.ai
+ CONSOLE_WEB_URL: ${CONSOLE_WEB_URL}
+ # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
+ # different from api or web app domain.
+ # example: http://cloud.dify.ai
+ CONSOLE_API_URL: ${CONSOLE_API_URL}
+ # The URL prefix for Service API endpoints, refers to the base URL of the current API service if api domain is
+ # different from console domain.
+ # example: http://api.dify.ai
+ SERVICE_API_URL: ${SERVICE_API_URL}
+ # The URL prefix for Web APP frontend, refers to the Web App base URL of WEB service if web app domain is different from
+ # console or api domain.
+ # example: http://udify.app
+ APP_WEB_URL: ${APP_WEB_URL}
+ # Whether to enable the version check policy. If set to false, https://updates.dify.ai will not be called for version check.
+ CHECK_UPDATE_URL: ${CHECK_UPDATE_URL}
+ # Used to change the OpenAI base address, default is https://api.openai.com/v1.
+ # When OpenAI cannot be accessed in China, replace it with a domestic mirror address,
+ # or when a local model provides OpenAI compatible API, it can be replaced.
+ OPENAI_API_BASE: ${OPENAI_API_BASE}
+ # File preview or download Url prefix.
+ # used to display File preview or download Url to the front-end or as Multi-model inputs;
+ # Url is signed and has expiration time.
+ FILES_URL: ${FILES_URL}
+ # File Access Time specifies a time interval in seconds for the file to be accessed.
+ # The default value is 300 seconds.
+ FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
+ # When enabled, migrations will be executed prior to application startup and the application will start after the migrations have completed.
+ MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
+ # Deployment environment.
+ # Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`.
+ # Testing environment. There will be a distinct color label on the front-end page,
+ # indicating that this environment is a testing environment.
+ DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
+ # API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed.
+ DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS}
+ # API service binding port number, default 5001.
+ DIFY_PORT: ${DIFY_PORT}
+ # The number of API server workers, i.e., the number of gevent workers.
+ # Formula: number of cpu cores x 2 + 1
+ # Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
+ SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT}
+ # Defaults to gevent. If using windows, it can be switched to sync or solo.
+ SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS}
+ # Similar to SERVER_WORKER_CLASS. Default is gevent.
+ # If using windows, it can be switched to sync or solo.
+ CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS}
+ # Request handling timeout. The default is 200,
+ # it is recommended to set it to 360 to support a longer sse connection time.
+ GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT}
+ # The number of Celery workers. The default is 1, and can be set as needed.
+ CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT}
+ # The configurations of postgres database connection.
+ # It is consistent with the configuration in the 'db' service below.
+ DB_USERNAME: ${DB_USERNAME}
+ DB_PASSWORD: ${DB_PASSWORD}
+ DB_HOST: ${DB_HOST}
+ DB_PORT: ${DB_PORT}
+ DB_DATABASE: ${DB_DATABASE}
+ # The size of the database connection pool.
+ # The default is 30 connections, which can be appropriately increased.
+ SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE}
+ # Database connection pool recycling time, the default is 3600 seconds.
+ SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE}
+ # Whether to print SQL, default is false.
+ SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO}
+ # The configurations of redis connection.
+ # It is consistent with the configuration in the 'redis' service below.
+ REDIS_HOST: ${REDIS_HOST}
+ REDIS_PORT: ${REDIS_PORT:-6379}
+ REDIS_USERNAME: ${REDIS_USERNAME}
+ REDIS_PASSWORD: ${REDIS_PASSWORD}
+ REDIS_USE_SSL: ${REDIS_USE_SSL}
+ # Redis Database, default is 0. Please use a different Database from Session Redis and Celery Broker.
+ REDIS_DB: 0
+ # The configurations of celery broker.
+ # Use redis as the broker, and redis db 1 for celery broker.
+ CELERY_BROKER_URL: ${CELERY_BROKER_URL}
+ BROKER_USE_SSL: ${BROKER_USE_SSL}
+ # Specifies the allowed origins for cross-origin requests to the Web API, e.g. https://dify.app or * for all origins.
+ WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS}
+ # Specifies the allowed origins for cross-origin requests to the console API, e.g. https://cloud.dify.ai or * for all origins.
+ CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS}
+ # The type of storage to use for storing user files. Supported values are `local` and `s3` and `azure-blob` and `google-storage`, Default: `local`
+ STORAGE_TYPE: ${STORAGE_TYPE}
+ # The path to the local storage directory, the directory relative the root path of API service codes or absolute path. Default: `storage` or `/home/john/storage`.
+ # only available when STORAGE_TYPE is `local`.
+ STORAGE_LOCAL_PATH: storage
+ # The S3 storage configurations, only available when STORAGE_TYPE is `s3`.
+ S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM}
+ S3_ENDPOINT: ${S3_ENDPOINT}
+ S3_BUCKET_NAME: ${S3_BUCKET_NAME}
+ S3_ACCESS_KEY: ${S3_ACCESS_KEY}
+ S3_SECRET_KEY: ${S3_SECRET_KEY}
+ S3_REGION: ${S3_REGION}
+ # The Azure Blob storage configurations, only available when STORAGE_TYPE is `azure-blob`.
+ AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME}
+ AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY}
+ AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME}
+ AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL}
+ # The Google storage configurations, only available when STORAGE_TYPE is `google-storage`.
+ GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME}
+ # if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty.
+ GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64}
+ # The Alibaba Cloud OSS configurations, only available when STORAGE_TYPE is `aliyun-oss`
+ ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME}
+ ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY}
+ ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY}
+ ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT}
+ ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION}
+ ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION}
+ # The Tencent COS storage configurations, only available when STORAGE_TYPE is `tencent-cos`.
+ TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME}
+ TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY}
+ TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID}
+ TENCENT_COS_REGION: ${TENCENT_COS_REGION}
+ TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME}
+ # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`, `chroma`, 'opensearch', 'tidb_vector'.
+ VECTOR_STORE: ${VECTOR_STORE}
+ # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
+ WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT}
+ # The Weaviate API key.
+ WEAVIATE_API_KEY: ${WEAVIATE_API_KEY}
+ # The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
+ QDRANT_URL: ${QDRANT_URL}
+ # The Qdrant API key.
+ QDRANT_API_KEY: ${QDRANT_API_KEY}
+ # The Qdrant client timeout setting.
+ QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT}
+ # The Qdrant client enable gRPC mode.
+ QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED}
+ # The Qdrant server gRPC mode PORT.
+ QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT}
+ # Milvus configuration Only available when VECTOR_STORE is `milvus`.
+ # The milvus host.
+ MILVUS_HOST: ${MILVUS_HOST}
+ # The milvus host.
+ MILVUS_PORT: ${MILVUS_PORT}
+ # The milvus username.
+ MILVUS_USER: ${MILVUS_USER}
+ # The milvus password.
+ MILVUS_PASSWORD: ${MILVUS_PASSWORD}
+ # The milvus tls switch.
+ MILVUS_SECURE: ${MILVUS_SECURE}
+ # relyt configurations
+ RELYT_HOST: ${RELYT_HOST}
+ RELYT_PORT: ${RELYT_PORT}
+ RELYT_USER: ${RELYT_USER}
+ RELYT_PASSWORD: ${RELYT_PASSWORD}
+ RELYT_DATABASE: ${RELYT_DATABASE}
+ # pgvector configurations
+ PGVECTOR_HOST: ${PGVECTOR_HOST}
+ PGVECTOR_PORT: ${PGVECTOR_PORT}
+ PGVECTOR_USER: ${PGVECTOR_USER}
+ PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD}
+ PGVECTOR_DATABASE: ${PGVECTOR_DATABASE}
+ # tidb vector configurations
+ TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST}
+ TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT}
+ TIDB_VECTOR_USER: ${TIDB_VECTOR_USER}
+ TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD}
+ TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE}
+ # oracle configurations
+ ORACLE_HOST: ${ORACLE_HOST}
+ ORACLE_PORT: ${ORACLE_PORT}
+ ORACLE_USER: ${ORACLE_USER}
+ ORACLE_PASSWORD: ${ORACLE_PASSWORD}
+ ORACLE_DATABASE: ${ORACLE_DATABASE}
+ # Chroma configuration
+ CHROMA_HOST: ${CHROMA_HOST}
+ CHROMA_PORT: ${CHROMA_PORT}
+ CHROMA_TENANT: ${CHROMA_TENANT}
+ CHROMA_DATABASE: ${CHROMA_DATABASE}
+ CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER}
+ CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS}
+ # OpenSearch configuration
+ OPENSEARCH_HOST: ${OPENSEARCH_HOST}
+ OPENSEARCH_PORT: ${OPENSEARCH_PORT}
+ OPENSEARCH_USER: ${OPENSEARCH_USER}
+ OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD}
+ OPENSEARCH_SECURE: ${OPENSEARCH_SECURE}
+ # tencent configurations
+ TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL}
+ TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY}
+ TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT}
+ TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME}
+ TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE}
+ TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD}
+ TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS}
+ # Knowledge Configuration
+ # Upload file size limit, default 15M.
+ UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT}
+ # The maximum number of files that can be uploaded at a time, default 5.
+ UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT}
+ # `dify` Dify's proprietary file extraction scheme
+ # `Unstructured` Unstructured.io file extraction scheme
+ ETL_TYPE: ${ETL_TYPE}
+ # Unstructured API path, needs to be configured when ETL_TYPE is Unstructured.
+ UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL}
+ # Multi-modal Configuration
+ # The format of the image sent when the multi-modal model is input, the default is base64, optional url.
+ MULTIMODAL_SEND_IMAGE_FORMAT: ${MULTIMODAL_SEND_IMAGE_FORMAT}
+ # Upload image file size limit, default 10M.
+ UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT}
+ # The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
+ SENTRY_DSN: ${SENTRY_DSN}
+ # The sample rate for Sentry events. Default: `1.0`
+ SENTRY_TRACES_SAMPLE_RATE: ${SENTRY_TRACES_SAMPLE_RATE}
+ # The sample rate for Sentry profiles. Default: `1.0`
+ SENTRY_PROFILES_SAMPLE_RATE: ${SENTRY_PROFILES_SAMPLE_RATE}
+ # Notion import configuration, support public and internal
+ NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE}
+ NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET}
+ NOTION_CLIENT_ID: ${NOTION_CLIENT_ID}
+ NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET}
+ # Mail configuration, support: resend, smtp
+ MAIL_TYPE: ${MAIL_TYPE}
+ # default send from email address, if not specified
+ MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM}
+ SMTP_SERVER: ${SMTP_SERVER}
+ SMTP_PORT: ${SMTP_PORT:-465}
+ SMTP_USERNAME: ${SMTP_USERNAME}
+ SMTP_PASSWORD: ${SMTP_PASSWORD}
+ SMTP_USE_TLS: ${SMTP_USE_TLS}
+ SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS}
+ # the api-key for resend (https://resend.com)
+ RESEND_API_KEY: ${RESEND_API_KEY}
+ RESEND_API_URL: https://api.resend.com
+ # Indexing configuration
+ INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH}
+ # Other configurations
+ INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS}
+ CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
+ CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox}
+ CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
+ CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:- -9223372036854775808}
+ CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
+ TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
+ CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
+ CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
+ CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
+ SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-'http://ssrf_proxy:3128'}
+ SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-'http://ssrf_proxy:3128'}
+
services:
# API service
api:
- image: langgenius/dify-api:0.6.11
+ image: langgenius/dify-api:0.6.12-fix1
restart: always
environment:
+ # Use the shared environment variables.
+ <<: *shared-api-worker-env
# Startup mode, 'api' starts the API server.
MODE: api
- # The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
- LOG_LEVEL: INFO
- # enable DEBUG mode to output more logs
- # DEBUG : true
- # A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
- SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
- # The base URL of console application web frontend, refers to the Console base URL of WEB service if console domain is
- # different from api or web app domain.
- # example: http://cloud.dify.ai
- CONSOLE_WEB_URL: ''
- # Password for admin user initialization.
- # If left unset, admin user will not be prompted for a password when creating the initial admin account.
- INIT_PASSWORD: ''
- # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
- # different from api or web app domain.
- # example: http://cloud.dify.ai
- CONSOLE_API_URL: ''
- # The URL prefix for Service API endpoints, refers to the base URL of the current API service if api domain is
- # different from console domain.
- # example: http://api.dify.ai
- SERVICE_API_URL: ''
- # The URL prefix for Web APP frontend, refers to the Web App base URL of WEB service if web app domain is different from
- # console or api domain.
- # example: http://udify.app
- APP_WEB_URL: ''
- # File preview or download Url prefix.
- # used to display File preview or download Url to the front-end or as Multi-model inputs;
- # Url is signed and has expiration time.
- FILES_URL: ''
- # File Access Time specifies a time interval in seconds for the file to be accessed.
- # The default value is 300 seconds.
- FILES_ACCESS_TIMEOUT: 300
- # When enabled, migrations will be executed prior to application startup and the application will start after the migrations have completed.
- MIGRATION_ENABLED: 'true'
- # The configurations of postgres database connection.
- # It is consistent with the configuration in the 'db' service below.
- DB_USERNAME: postgres
- DB_PASSWORD: difyai123456
- DB_HOST: db
- DB_PORT: 5432
- DB_DATABASE: dify
- # The configurations of redis connection.
- # It is consistent with the configuration in the 'redis' service below.
- REDIS_HOST: redis
- REDIS_PORT: 6379
- REDIS_USERNAME: ''
- REDIS_PASSWORD: difyai123456
- REDIS_USE_SSL: 'false'
- # use redis db 0 for redis cache
- REDIS_DB: 0
- # The configurations of celery broker.
- # Use redis as the broker, and redis db 1 for celery broker.
- CELERY_BROKER_URL: redis://:difyai123456@redis:6379/1
- # Specifies the allowed origins for cross-origin requests to the Web API, e.g. https://dify.app or * for all origins.
- WEB_API_CORS_ALLOW_ORIGINS: '*'
- # Specifies the allowed origins for cross-origin requests to the console API, e.g. https://cloud.dify.ai or * for all origins.
- CONSOLE_CORS_ALLOW_ORIGINS: '*'
- # CSRF Cookie settings
- # Controls whether a cookie is sent with cross-site requests,
- # providing some protection against cross-site request forgery attacks
- #
- # Default: `SameSite=Lax, Secure=false, HttpOnly=true`
- # This default configuration supports same-origin requests using either HTTP or HTTPS,
- # but does not support cross-origin requests. It is suitable for local debugging purposes.
- #
- # If you want to enable cross-origin support,
- # you must use the HTTPS protocol and set the configuration to `SameSite=None, Secure=true, HttpOnly=true`.
- #
- # The type of storage to use for storing user files. Supported values are `local` and `s3` and `azure-blob` and `google-storage`, Default: `local`
- STORAGE_TYPE: local
- # The path to the local storage directory, the directory relative the root path of API service codes or absolute path. Default: `storage` or `/home/john/storage`.
- # only available when STORAGE_TYPE is `local`.
- STORAGE_LOCAL_PATH: storage
- # The S3 storage configurations, only available when STORAGE_TYPE is `s3`.
- S3_USE_AWS_MANAGED_IAM: 'false'
- S3_ENDPOINT: 'https://xxx.r2.cloudflarestorage.com'
- S3_BUCKET_NAME: 'difyai'
- S3_ACCESS_KEY: 'ak-difyai'
- S3_SECRET_KEY: 'sk-difyai'
- S3_REGION: 'us-east-1'
- # The Azure Blob storage configurations, only available when STORAGE_TYPE is `azure-blob`.
- AZURE_BLOB_ACCOUNT_NAME: 'difyai'
- AZURE_BLOB_ACCOUNT_KEY: 'difyai'
- AZURE_BLOB_CONTAINER_NAME: 'difyai-container'
- AZURE_BLOB_ACCOUNT_URL: 'https://.blob.core.windows.net'
- # The Google storage configurations, only available when STORAGE_TYPE is `google-storage`.
- GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name'
- # if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty.
- GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string'
- # The Alibaba Cloud OSS configurations, only available when STORAGE_TYPE is `aliyun-oss`
- ALIYUN_OSS_BUCKET_NAME: 'your-bucket-name'
- ALIYUN_OSS_ACCESS_KEY: 'your-access-key'
- ALIYUN_OSS_SECRET_KEY: 'your-secret-key'
- ALIYUN_OSS_ENDPOINT: 'https://oss-ap-southeast-1-internal.aliyuncs.com'
- ALIYUN_OSS_REGION: 'ap-southeast-1'
- ALIYUN_OSS_AUTH_VERSION: 'v4'
- # The Tencent COS storage configurations, only available when STORAGE_TYPE is `tencent-cos`.
- TENCENT_COS_BUCKET_NAME: 'your-bucket-name'
- TENCENT_COS_SECRET_KEY: 'your-secret-key'
- TENCENT_COS_SECRET_ID: 'your-secret-id'
- TENCENT_COS_REGION: 'your-region'
- TENCENT_COS_SCHEME: 'your-scheme'
- # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`,`pgvector`, `chroma`, 'opensearch', 'tidb_vector'.
- VECTOR_STORE: weaviate
- # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
- WEAVIATE_ENDPOINT: http://weaviate:8080
- # The Weaviate API key.
- WEAVIATE_API_KEY: WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
- # The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
- QDRANT_URL: http://qdrant:6333
- # The Qdrant API key.
- QDRANT_API_KEY: difyai123456
- # The Qdrant client timeout setting.
- QDRANT_CLIENT_TIMEOUT: 20
- # The Qdrant client enable gRPC mode.
- QDRANT_GRPC_ENABLED: 'false'
- # The Qdrant server gRPC mode PORT.
- QDRANT_GRPC_PORT: 6334
- # Milvus configuration Only available when VECTOR_STORE is `milvus`.
- # The milvus host.
- MILVUS_HOST: 127.0.0.1
- # The milvus host.
- MILVUS_PORT: 19530
- # The milvus username.
- MILVUS_USER: root
- # The milvus password.
- MILVUS_PASSWORD: Milvus
- # The milvus tls switch.
- MILVUS_SECURE: 'false'
- # relyt configurations
- RELYT_HOST: db
- RELYT_PORT: 5432
- RELYT_USER: postgres
- RELYT_PASSWORD: difyai123456
- RELYT_DATABASE: postgres
- # pgvector configurations
- PGVECTOR_HOST: pgvector
- PGVECTOR_PORT: 5432
- PGVECTOR_USER: postgres
- PGVECTOR_PASSWORD: difyai123456
- PGVECTOR_DATABASE: dify
- # tidb vector configurations
- TIDB_VECTOR_HOST: tidb
- TIDB_VECTOR_PORT: 4000
- TIDB_VECTOR_USER: xxx.root
- TIDB_VECTOR_PASSWORD: xxxxxx
- TIDB_VECTOR_DATABASE: dify
- # oracle configurations
- ORACLE_HOST: oracle
- ORACLE_PORT: 1521
- ORACLE_USER: dify
- ORACLE_PASSWORD: dify
- ORACLE_DATABASE: FREEPDB1
- # Chroma configuration
- CHROMA_HOST: 127.0.0.1
- CHROMA_PORT: 8000
- CHROMA_TENANT: default_tenant
- CHROMA_DATABASE: default_database
- CHROMA_AUTH_PROVIDER: chromadb.auth.token_authn.TokenAuthClientProvider
- CHROMA_AUTH_CREDENTIALS: xxxxxx
- # Mail configuration, support: resend, smtp
- MAIL_TYPE: ''
- # default send from email address, if not specified
- MAIL_DEFAULT_SEND_FROM: 'YOUR EMAIL FROM (eg: no-reply )'
- SMTP_SERVER: ''
- SMTP_PORT: 465
- SMTP_USERNAME: ''
- SMTP_PASSWORD: ''
- SMTP_USE_TLS: 'true'
- SMTP_OPPORTUNISTIC_TLS: 'false'
- # the api-key for resend (https://resend.com)
- RESEND_API_KEY: ''
- RESEND_API_URL: https://api.resend.com
- # The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
- SENTRY_DSN: ''
- # The sample rate for Sentry events. Default: `1.0`
- SENTRY_TRACES_SAMPLE_RATE: 1.0
- # The sample rate for Sentry profiles. Default: `1.0`
- SENTRY_PROFILES_SAMPLE_RATE: 1.0
- # Notion import configuration, support public and internal
- NOTION_INTEGRATION_TYPE: public
- NOTION_CLIENT_SECRET: you-client-secret
- NOTION_CLIENT_ID: you-client-id
- NOTION_INTERNAL_SECRET: you-internal-secret
- # The sandbox service endpoint.
- CODE_EXECUTION_ENDPOINT: "http://sandbox:8194"
- CODE_EXECUTION_API_KEY: dify-sandbox
- CODE_MAX_NUMBER: 9223372036854775807
- CODE_MIN_NUMBER: -9223372036854775808
- CODE_MAX_STRING_LENGTH: 80000
- TEMPLATE_TRANSFORM_MAX_LENGTH: 80000
- CODE_MAX_STRING_ARRAY_LENGTH: 30
- CODE_MAX_OBJECT_ARRAY_LENGTH: 30
- CODE_MAX_NUMBER_ARRAY_LENGTH: 1000
- # SSRF Proxy server
- SSRF_PROXY_HTTP_URL: 'http://ssrf_proxy:3128'
- SSRF_PROXY_HTTPS_URL: 'http://ssrf_proxy:3128'
- # Indexing configuration
- INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: 1000
depends_on:
- db
- redis
volumes:
# Mount the storage directory to the container, for storing user files.
- ./volumes/app/storage:/app/api/storage
- # uncomment to expose dify-api port to host
- # ports:
- # - "5001:5001"
networks:
- ssrf_proxy_network
- default
@@ -221,160 +279,13 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
- image: langgenius/dify-api:0.6.11
+ image: langgenius/dify-api:0.6.12-fix1
restart: always
environment:
- CONSOLE_WEB_URL: ''
+ # Use the shared environment variables.
+ <<: *shared-api-worker-env
# Startup mode, 'worker' starts the Celery worker for processing the queue.
MODE: worker
-
- # --- All the configurations below are the same as those in the 'api' service. ---
-
- # The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
- LOG_LEVEL: INFO
- # A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
- # same as the API service
- SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
- # The configurations of postgres database connection.
- # It is consistent with the configuration in the 'db' service below.
- DB_USERNAME: postgres
- DB_PASSWORD: difyai123456
- DB_HOST: db
- DB_PORT: 5432
- DB_DATABASE: dify
- # The configurations of redis cache connection.
- REDIS_HOST: redis
- REDIS_PORT: 6379
- REDIS_USERNAME: ''
- REDIS_PASSWORD: difyai123456
- REDIS_DB: 0
- REDIS_USE_SSL: 'false'
- # The configurations of celery broker.
- CELERY_BROKER_URL: redis://:difyai123456@redis:6379/1
- # The type of storage to use for storing user files. Supported values are `local` and `s3` and `azure-blob` and `google-storage`, Default: `local`
- STORAGE_TYPE: local
- STORAGE_LOCAL_PATH: storage
- # The S3 storage configurations, only available when STORAGE_TYPE is `s3`.
- S3_USE_AWS_MANAGED_IAM: 'false'
- S3_ENDPOINT: 'https://xxx.r2.cloudflarestorage.com'
- S3_BUCKET_NAME: 'difyai'
- S3_ACCESS_KEY: 'ak-difyai'
- S3_SECRET_KEY: 'sk-difyai'
- S3_REGION: 'us-east-1'
- # The Azure Blob storage configurations, only available when STORAGE_TYPE is `azure-blob`.
- AZURE_BLOB_ACCOUNT_NAME: 'difyai'
- AZURE_BLOB_ACCOUNT_KEY: 'difyai'
- AZURE_BLOB_CONTAINER_NAME: 'difyai-container'
- AZURE_BLOB_ACCOUNT_URL: 'https://.blob.core.windows.net'
- # The Google storage configurations, only available when STORAGE_TYPE is `google-storage`.
- GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name'
- # if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty.
- GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string'
- # The Alibaba Cloud OSS configurations, only available when STORAGE_TYPE is `aliyun-oss`
- ALIYUN_OSS_BUCKET_NAME: 'your-bucket-name'
- ALIYUN_OSS_ACCESS_KEY: 'your-access-key'
- ALIYUN_OSS_SECRET_KEY: 'your-secret-key'
- ALIYUN_OSS_ENDPOINT: 'https://oss-ap-southeast-1-internal.aliyuncs.com'
- ALIYUN_OSS_REGION: 'ap-southeast-1'
- ALIYUN_OSS_AUTH_VERSION: 'v4'
- # The Tencent COS storage configurations, only available when STORAGE_TYPE is `tencent-cos`.
- TENCENT_COS_BUCKET_NAME: 'your-bucket-name'
- TENCENT_COS_SECRET_KEY: 'your-secret-key'
- TENCENT_COS_SECRET_ID: 'your-secret-id'
- TENCENT_COS_REGION: 'your-region'
- TENCENT_COS_SCHEME: 'your-scheme'
- # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`, `chroma`, 'opensearch', 'tidb_vector'.
- VECTOR_STORE: weaviate
- # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
- WEAVIATE_ENDPOINT: http://weaviate:8080
- # The Weaviate API key.
- WEAVIATE_API_KEY: WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
- # The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
- QDRANT_URL: http://qdrant:6333
- # The Qdrant API key.
- QDRANT_API_KEY: difyai123456
- # The Qdrant client timeout setting.
- QDRANT_CLIENT_TIMEOUT: 20
- # The Qdrant client enable gRPC mode.
- QDRANT_GRPC_ENABLED: 'false'
- # The Qdrant server gRPC mode PORT.
- QDRANT_GRPC_PORT: 6334
- # Milvus configuration Only available when VECTOR_STORE is `milvus`.
- # The milvus host.
- MILVUS_HOST: 127.0.0.1
- # The milvus host.
- MILVUS_PORT: 19530
- # The milvus username.
- MILVUS_USER: root
- # The milvus password.
- MILVUS_PASSWORD: Milvus
- # The milvus tls switch.
- MILVUS_SECURE: 'false'
- # Mail configuration, support: resend
- MAIL_TYPE: ''
- # default send from email address, if not specified
- MAIL_DEFAULT_SEND_FROM: 'YOUR EMAIL FROM (eg: no-reply )'
- SMTP_SERVER: ''
- SMTP_PORT: 465
- SMTP_USERNAME: ''
- SMTP_PASSWORD: ''
- SMTP_USE_TLS: 'true'
- SMTP_OPPORTUNISTIC_TLS: 'false'
- # the api-key for resend (https://resend.com)
- RESEND_API_KEY: ''
- RESEND_API_URL: https://api.resend.com
- # relyt configurations
- RELYT_HOST: db
- RELYT_PORT: 5432
- RELYT_USER: postgres
- RELYT_PASSWORD: difyai123456
- RELYT_DATABASE: postgres
- # tencent configurations
- TENCENT_VECTOR_DB_URL: http://127.0.0.1
- TENCENT_VECTOR_DB_API_KEY: dify
- TENCENT_VECTOR_DB_TIMEOUT: 30
- TENCENT_VECTOR_DB_USERNAME: dify
- TENCENT_VECTOR_DB_DATABASE: dify
- TENCENT_VECTOR_DB_SHARD: 1
- TENCENT_VECTOR_DB_REPLICAS: 2
- # OpenSearch configuration
- OPENSEARCH_HOST: 127.0.0.1
- OPENSEARCH_PORT: 9200
- OPENSEARCH_USER: admin
- OPENSEARCH_PASSWORD: admin
- OPENSEARCH_SECURE: 'true'
- # pgvector configurations
- PGVECTOR_HOST: pgvector
- PGVECTOR_PORT: 5432
- PGVECTOR_USER: postgres
- PGVECTOR_PASSWORD: difyai123456
- PGVECTOR_DATABASE: dify
- # tidb vector configurations
- TIDB_VECTOR_HOST: tidb
- TIDB_VECTOR_PORT: 4000
- TIDB_VECTOR_USER: xxx.root
- TIDB_VECTOR_PASSWORD: xxxxxx
- TIDB_VECTOR_DATABASE: dify
- # oracle configurations
- ORACLE_HOST: oracle
- ORACLE_PORT: 1521
- ORACLE_USER: dify
- ORACLE_PASSWORD: dify
- ORACLE_DATABASE: FREEPDB1
- # Chroma configuration
- CHROMA_HOST: 127.0.0.1
- CHROMA_PORT: 8000
- CHROMA_TENANT: default_tenant
- CHROMA_DATABASE: default_database
- CHROMA_AUTH_PROVIDER: chromadb.auth.token_authn.TokenAuthClientProvider
- CHROMA_AUTH_CREDENTIALS: xxxxxx
- # Notion import configuration, support public and internal
- NOTION_INTEGRATION_TYPE: public
- NOTION_CLIENT_SECRET: you-client-secret
- NOTION_CLIENT_ID: you-client-id
- NOTION_INTERNAL_SECRET: you-internal-secret
- # Indexing configuration
- INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: 1000
depends_on:
- db
- redis
@@ -387,43 +298,24 @@ services:
# Frontend web application.
web:
- image: langgenius/dify-web:0.6.11
+ image: langgenius/dify-web:0.6.12-fix1
restart: always
environment:
- # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
- # different from api or web app domain.
- # example: http://cloud.dify.ai
- CONSOLE_API_URL: ''
- # The URL for Web APP api server, refers to the Web App base URL of WEB service if web app domain is different from
- # console or api domain.
- # example: http://udify.app
- APP_API_URL: ''
- # The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
- SENTRY_DSN: ''
- # uncomment to expose dify-web port to host
- # ports:
- # - "3000:3000"
+ CONSOLE_API_URL: ${CONSOLE_API_URL:-}
+ APP_API_URL: ${APP_API_URL:-}
+ SENTRY_DSN: ${SENTRY_DSN:-}
# The postgres database.
db:
image: postgres:15-alpine
restart: always
environment:
- PGUSER: postgres
- # The password for the default postgres user.
- POSTGRES_PASSWORD: difyai123456
- # The name of the default postgres database.
- POSTGRES_DB: dify
- # postgres data directory
- PGDATA: /var/lib/postgresql/data/pgdata
+ PGUSER: ${PGUSER:-postgres}
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
+ POSTGRES_DB: ${POSTGRES_DB:-dify}
+ PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
- # notice!: if you use windows-wsl2, postgres may not work properly due to the ntfs issue.you can use volumes to mount the data directory to the host.
- # if you use the following config, you need to uncomment the volumes configuration below at the end of the file.
- # - postgres:/var/lib/postgresql/data
- # uncomment to expose db(postgresql) port to host
- # ports:
- # - "5432:5432"
healthcheck:
test: [ "CMD", "pg_isready" ]
interval: 1s
@@ -438,36 +330,9 @@ services:
# Mount the redis data directory to the container.
- ./volumes/redis/data:/data
# Set the redis password when startup redis server.
- command: redis-server --requirepass difyai123456
+ command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
healthcheck:
test: [ "CMD", "redis-cli", "ping" ]
- # uncomment to expose redis port to host
- # ports:
- # - "6379:6379"
-
- # The Weaviate vector store.
- weaviate:
- image: semitechnologies/weaviate:1.19.0
- restart: always
- volumes:
- # Mount the Weaviate data directory to the container.
- - ./volumes/weaviate:/var/lib/weaviate
- environment:
- # The Weaviate configurations
- # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
- QUERY_DEFAULTS_LIMIT: 25
- AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'false'
- PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
- DEFAULT_VECTORIZER_MODULE: 'none'
- CLUSTER_HOSTNAME: 'node1'
- AUTHENTICATION_APIKEY_ENABLED: 'true'
- AUTHENTICATION_APIKEY_ALLOWED_KEYS: 'WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih'
- AUTHENTICATION_APIKEY_USERS: 'hello@dify.ai'
- AUTHORIZATION_ADMINLIST_ENABLED: 'true'
- AUTHORIZATION_ADMINLIST_USERS: 'hello@dify.ai'
- # uncomment to expose weaviate port to host
- # ports:
- # - "8080:8080"
# The DifySandbox
sandbox:
@@ -477,13 +342,13 @@ services:
# The DifySandbox configurations
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
- API_KEY: dify-sandbox
- GIN_MODE: 'release'
- WORKER_TIMEOUT: 15
- ENABLE_NETWORK: 'true'
- HTTP_PROXY: 'http://ssrf_proxy:3128'
- HTTPS_PROXY: 'http://ssrf_proxy:3128'
- SANDBOX_PORT: 8194
+ API_KEY: ${API_KEY:-dify-sandbox}
+ GIN_MODE: ${GIN_MODE:-release}
+ WORKER_TIMEOUT: ${WORKER_TIMEOUT:-15}
+ ENABLE_NETWORK: ${ENABLE_NETWORK:-true}
+ HTTP_PROXY: ${HTTP_PROXY:-http://ssrf_proxy:3128}
+ HTTPS_PROXY: ${HTTPS_PROXY:-http://ssrf_proxy:3128}
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
volumes:
- ./volumes/sandbox/dependencies:/dependencies
networks:
@@ -496,67 +361,19 @@ services:
image: ubuntu/squid:latest
restart: always
volumes:
- # pls clearly modify the squid.conf file to fit your network environment.
- - ./volumes/ssrf_proxy/squid.conf:/etc/squid/squid.conf
+ - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
+ - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint.sh
+ entrypoint: ["sh", "-c", "chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh"]
+ environment:
+ # pls clearly modify the squid env vars to fit your network environment.
+ HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
+ COREDUMP_DIR: ${COREDUMP_DIR:-/var/spool/squid}
+ REVERSE_PROXY_PORT: ${REVERSE_PROXY_PORT:-8194}
+ SANDBOX_HOST: ${SANDBOX_HOST:-sandbox}
+ SANDBOX_PORT: ${SANDBOX_PORT:-8194}
networks:
- ssrf_proxy_network
- default
- # Qdrant vector store.
- # uncomment to use qdrant as vector store.
- # (if uncommented, you need to comment out the weaviate service above,
- # and set VECTOR_STORE to qdrant in the api & worker service.)
- # qdrant:
- # image: langgenius/qdrant:v1.7.3
- # restart: always
- # volumes:
- # - ./volumes/qdrant:/qdrant/storage
- # environment:
- # QDRANT_API_KEY: 'difyai123456'
- # # uncomment to expose qdrant port to host
- # # ports:
- # # - "6333:6333"
- # # - "6334:6334"
-
- # The pgvector vector database.
- # Uncomment to use qdrant as vector store.
- # pgvector:
- # image: pgvector/pgvector:pg16
- # restart: always
- # environment:
- # PGUSER: postgres
- # # The password for the default postgres user.
- # POSTGRES_PASSWORD: difyai123456
- # # The name of the default postgres database.
- # POSTGRES_DB: dify
- # # postgres data directory
- # PGDATA: /var/lib/postgresql/data/pgdata
- # volumes:
- # - ./volumes/pgvector/data:/var/lib/postgresql/data
- # # uncomment to expose db(postgresql) port to host
- # # ports:
- # # - "5433:5432"
- # healthcheck:
- # test: [ "CMD", "pg_isready" ]
- # interval: 1s
- # timeout: 3s
- # retries: 30
-
- # The oracle vector database.
- # Uncomment to use oracle23ai as vector store. Also need to Uncomment volumes block
- # oracle:
- # image: container-registry.oracle.com/database/free:latest
- # restart: always
- # ports:
- # - 1521:1521
- # volumes:
- # - type: volume
- # source: oradata
- # target: /opt/oracle/oradata
- # - ./startupscripts:/opt/oracle/scripts/startup
- # environment:
- # - ORACLE_PWD=Dify123456
- # - ORACLE_CHARACTERSET=AL32UTF8
-
# The nginx reverse proxy.
# used for reverse proxying the API service and Web service.
@@ -564,24 +381,250 @@ services:
image: nginx:latest
restart: always
volumes:
- - ./nginx/nginx.conf:/etc/nginx/nginx.conf
- - ./nginx/proxy.conf:/etc/nginx/proxy.conf
+ - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
+ - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
- ./nginx/conf.d:/etc/nginx/conf.d
- #- ./nginx/ssl:/etc/ssl
+ - ./nginx/docker-entrypoint.sh:/docker-entrypoint.sh
+ - ./nginx/ssl:/etc/ssl
+ entrypoint: ["sh", "-c", "chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh"]
+ environment:
+ NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
+ HTTPS_ENABLED: ${HTTPS_ENABLED:-false}
+ NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
+ # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
+ # and modify the env vars below in .env if HTTPS_ENABLED is true.
+ NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
+ NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
+ NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
+ NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
+ NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
+ NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
+ NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
+ NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
depends_on:
- api
- web
ports:
- - "80:80"
- #- "443:443"
-# notice: if you use windows-wsl2, postgres may not work properly due to the ntfs issue.you can use volumes to mount the data directory to the host.
-# volumes:
-# postgres:
+ - "${EXPOSE_NGINX_PORT:-80}:80"
+ - "${EXPOSE_NGINX_SSL_PORT:-443}:443"
+
+ # The Weaviate vector store.
+ weaviate:
+ image: semitechnologies/weaviate:1.19.0
+ profiles:
+ - weaviate
+ restart: always
+ volumes:
+ # Mount the Weaviate data directory to the con tainer.
+ - ./volumes/weaviate:/var/lib/weaviate
+ environment:
+ # The Weaviate configurations
+ # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
+ PERSISTENCE_DATA_PATH: ${PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
+ QUERY_DEFAULTS_LIMIT: ${QUERY_DEFAULTS_LIMIT:-25}
+ AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
+ DEFAULT_VECTORIZER_MODULE: ${DEFAULT_VECTORIZER_MODULE:-none}
+ CLUSTER_HOSTNAME: ${CLUSTER_HOSTNAME:-node1}
+ AUTHENTICATION_APIKEY_ENABLED: ${AUTHENTICATION_APIKEY_ENABLED:-true}
+ AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
+ AUTHENTICATION_APIKEY_USERS: ${AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
+ AUTHORIZATION_ADMINLIST_ENABLED: ${AUTHORIZATION_ADMINLIST_ENABLED:-true}
+ AUTHORIZATION_ADMINLIST_USERS: ${AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
+
+ # Qdrant vector store.
+ # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
+ qdrant:
+ image: langgenius/qdrant:v1.7.3
+ profiles:
+ - qdrant
+ restart: always
+ volumes:
+ - ./volumes/qdrant:/qdrant/storage
+ environment:
+ QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
+
+ # The pgvector vector database.
+ pgvector:
+ image: pgvector/pgvector:pg16
+ profiles:
+ - pgvector
+ restart: always
+ environment:
+ PGUSER: ${PGVECTOR_PGUSER:-postgres}
+ # The password for the default postgres user.
+ POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
+ # The name of the default postgres database.
+ POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
+ # postgres data directory
+ PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
+ volumes:
+ - ./volumes/pgvector/data:/var/lib/postgresql/data
+ healthcheck:
+ test: [ "CMD", "pg_isready" ]
+ interval: 1s
+ timeout: 3s
+ retries: 30
+
+ # pgvecto-rs vector store
+ pgvecto-rs:
+ image: tensorchord/pgvecto-rs:pg16-v0.2.0
+ profiles:
+ - pgvecto-rs
+ restart: always
+ environment:
+ PGUSER: ${PGUSER:-postgres}
+ # The password for the default postgres user.
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
+ # The name of the default postgres database.
+ POSTGRES_DB: ${POSTGRES_DB:-dify}
+ # postgres data directory
+ PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
+ volumes:
+ - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
+ healthcheck:
+ test: [ "CMD", "pg_isready" ]
+ interval: 1s
+ timeout: 3s
+ retries: 30
+
+ # Chroma vector database
+ chroma:
+ image: ghcr.io/chroma-core/chroma:0.5.1
+ profiles:
+ - chroma
+ restart: always
+ volumes:
+ - ./volumes/chroma:/chroma/chroma
+ environment:
+ CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
+ CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
+ IS_PERSISTENT: ${IS_PERSISTENT:-TRUE}
+
+ oracle:
+ image: container-registry.oracle.com/database/free:latest
+ profiles:
+ - oracle
+ restart: always
+ volumes:
+ - type: volume
+ source: oradata
+ target: /opt/oracle/oradata
+ - ./startupscripts:/opt/oracle/scripts/startup
+ environment:
+ - ORACLE_PWD=${ORACLE_PWD:-Dify123456}
+ - ORACLE_CHARACTERSET=${ORACLE_CHARACTERSET:-AL32UTF8}
+
+ # Milvus vector database services
+ etcd:
+ container_name: milvus-etcd
+ image: quay.io/coreos/etcd:v3.5.5
+ profiles:
+ - milvus
+ environment:
+ - ETCD_AUTO_COMPACTION_MODE=${ETCD_AUTO_COMPACTION_MODE:-revision}
+ - ETCD_AUTO_COMPACTION_RETENTION=${ETCD_AUTO_COMPACTION_RETENTION:-1000}
+ - ETCD_QUOTA_BACKEND_BYTES=${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
+ - ETCD_SNAPSHOT_COUNT=${ETCD_SNAPSHOT_COUNT:-50000}
+ volumes:
+ - ./volumes/milvus/etcd:/etcd
+ command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
+ healthcheck:
+ test: ["CMD", "etcdctl", "endpoint", "health"]
+ interval: 30s
+ timeout: 20s
+ retries: 3
+ networks:
+ - milvus
+
+ minio:
+ container_name: milvus-minio
+ image: minio/minio:RELEASE.2023-03-20T20-16-18Z
+ profiles:
+ - milvus
+ environment:
+ MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
+ MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
+ volumes:
+ - ./volumes/milvus/minio:/minio_data
+ command: minio server /minio_data --console-address ":9001"
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
+ interval: 30s
+ timeout: 20s
+ retries: 3
+ networks:
+ - milvus
+
+ milvus-standalone:
+ container_name: milvus-standalone
+ image: milvusdb/milvus:v2.3.1
+ profiles:
+ - milvus
+ command: ["milvus", "run", "standalone"]
+ environment:
+ ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
+ MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
+ common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
+ volumes:
+ - ./volumes/milvus/milvus:/var/lib/milvus
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
+ interval: 30s
+ start_period: 90s
+ timeout: 20s
+ retries: 3
+ depends_on:
+ - "etcd"
+ - "minio"
+ networks:
+ - milvus
+
+ opensearch:
+ container_name: opensearch
+ image: opensearchproject/opensearch:latest
+ profiles:
+ - opensearch
+ environment:
+ - discovery.type=${OPENSEARCH_DISCOVERY_TYPE:-single-node}
+ - bootstrap.memory_lock=${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
+ - OPENSEARCH_JAVA_OPTS=-Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
+ - OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
+ ulimits:
+ memlock:
+ soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
+ hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
+ nofile:
+ soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
+ hard: ${OPENSEARCH_NOFILE_HARD:-65536}
+ volumes:
+ - ./volumes/opensearch/data:/usr/share/opensearch/data
+ networks:
+ - opensearch-net
+
+ opensearch-dashboards:
+ container_name: opensearch-dashboards
+ image: opensearchproject/opensearch-dashboards:latest
+ profiles:
+ - opensearch
+ environment:
+ OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
+ volumes:
+ - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
+ networks:
+ - opensearch-net
+ depends_on:
+ - opensearch
+
networks:
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
ssrf_proxy_network:
driver: bridge
internal: true
+ milvus:
+ driver: bridge
+ opensearch-net:
+ driver: bridge
+ internal: true
-#volumes:
-# oradata:
+volumes:
+ oradata:
diff --git a/docker/middleware.env.example b/docker/middleware.env.example
new file mode 100644
index 00000000000000..051a79d54eae98
--- /dev/null
+++ b/docker/middleware.env.example
@@ -0,0 +1,51 @@
+# ------------------------------
+# Environment Variables for db Service
+# ------------------------------
+PGUSER=postgres
+# The password for the default postgres user.
+POSTGRES_PASSWORD=difyai123456
+# The name of the default postgres database.
+POSTGRES_DB=dify
+# postgres data directory
+PGDATA=/var/lib/postgresql/data/pgdata
+
+
+# ------------------------------
+# Environment Variables for qdrant Service
+# (only used when VECTOR_STORE is qdrant)
+# ------------------------------
+QDRANT_API_KEY=difyai123456
+
+# ------------------------------
+# Environment Variables for sandbox Service
+API_KEY=dify-sandbox
+GIN_MODE=release
+WORKER_TIMEOUT=15
+ENABLE_NETWORK=true
+HTTP_PROXY=http://ssrf_proxy:3128
+HTTPS_PROXY=http://ssrf_proxy:3128
+SANDBOX_PORT=8194
+# ------------------------------
+
+# ------------------------------
+# Environment Variables for weaviate Service
+# (only used when VECTOR_STORE is weaviate)
+# ------------------------------
+QUERY_DEFAULTS_LIMIT=25
+AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
+DEFAULT_VECTORIZER_MODULE=none
+CLUSTER_HOSTNAME=node1
+AUTHENTICATION_APIKEY_ENABLED=true
+AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
+AUTHENTICATION_APIKEY_USERS=hello@dify.ai
+AUTHORIZATION_ADMINLIST_ENABLED=true
+AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
+
+# ------------------------------
+# Docker Compose Service Expose Host Port Configurations
+# ------------------------------
+EXPOSE_POSTGRES_PORT=5432
+EXPOSE_REDIS_PORT=6379
+EXPOSE_SANDBOX_PORT=8194
+EXPOSE_SSRF_PROXY_PORT=3128
+EXPOSE_WEAVIATE_PORT=8080
diff --git a/docker/nginx/conf.d/default.conf.template b/docker/nginx/conf.d/default.conf.template
new file mode 100644
index 00000000000000..af2cfa74557309
--- /dev/null
+++ b/docker/nginx/conf.d/default.conf.template
@@ -0,0 +1,34 @@
+# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
+
+server {
+ listen 80;
+ server_name ${NGINX_SERVER_NAME};
+
+ location /console/api {
+ proxy_pass http://api:5001;
+ include proxy.conf;
+ }
+
+ location /api {
+ proxy_pass http://api:5001;
+ include proxy.conf;
+ }
+
+ location /v1 {
+ proxy_pass http://api:5001;
+ include proxy.conf;
+ }
+
+ location /files {
+ proxy_pass http://api:5001;
+ include proxy.conf;
+ }
+
+ location / {
+ proxy_pass http://web:3000;
+ include proxy.conf;
+ }
+
+ # placeholder for https config defined in https.conf.template
+ ${HTTPS_CONFIG}
+}
diff --git a/docker/nginx/docker-entrypoint.sh b/docker/nginx/docker-entrypoint.sh
new file mode 100755
index 00000000000000..0c24774d9df22c
--- /dev/null
+++ b/docker/nginx/docker-entrypoint.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+if [ "${HTTPS_ENABLED}" = "true" ]; then
+ # set the HTTPS_CONFIG environment variable to the content of the https.conf.template
+ HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template)
+ export HTTPS_CONFIG
+ # Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template
+ envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
+fi
+
+env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -)
+
+envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
+envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf
+
+envsubst < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
+
+# Start Nginx using the default entrypoint
+exec nginx -g 'daemon off;'
\ No newline at end of file
diff --git a/docker/nginx/https.conf.template b/docker/nginx/https.conf.template
new file mode 100644
index 00000000000000..12a6f56e3b13f0
--- /dev/null
+++ b/docker/nginx/https.conf.template
@@ -0,0 +1,9 @@
+# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
+
+listen ${NGINX_SSL_PORT} ssl;
+ssl_certificate ./../ssl/${NGINX_SSL_CERT_FILENAME};
+ssl_certificate_key ./../ssl/${NGINX_SSL_CERT_KEY_FILENAME};
+ssl_protocols ${NGINX_SSL_PROTOCOLS};
+ssl_prefer_server_ciphers on;
+ssl_session_cache shared:SSL:10m;
+ssl_session_timeout 10m;
\ No newline at end of file
diff --git a/docker/nginx/nginx.conf.template b/docker/nginx/nginx.conf.template
new file mode 100644
index 00000000000000..32a571653ebe41
--- /dev/null
+++ b/docker/nginx/nginx.conf.template
@@ -0,0 +1,34 @@
+# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
+
+user nginx;
+worker_processes ${NGINX_WORKER_PROCESSES};
+
+error_log /var/log/nginx/error.log notice;
+pid /var/run/nginx.pid;
+
+
+events {
+ worker_connections 1024;
+}
+
+
+http {
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for"';
+
+ access_log /var/log/nginx/access.log main;
+
+ sendfile on;
+ #tcp_nopush on;
+
+ keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT};
+
+ #gzip on;
+ client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE};
+
+ include /etc/nginx/conf.d/*.conf;
+}
\ No newline at end of file
diff --git a/docker/nginx/proxy.conf.template b/docker/nginx/proxy.conf.template
new file mode 100644
index 00000000000000..6b52d23512a60b
--- /dev/null
+++ b/docker/nginx/proxy.conf.template
@@ -0,0 +1,10 @@
+# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
+
+proxy_set_header Host $host;
+proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+proxy_set_header X-Forwarded-Proto $scheme;
+proxy_http_version 1.1;
+proxy_set_header Connection "";
+proxy_buffering off;
+proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT};
+proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT};
diff --git a/docker/nginx/ssl/.gitkeep b/docker/nginx/ssl/.gitkeep
index 8b137891791fe9..e69de29bb2d1d6 100644
--- a/docker/nginx/ssl/.gitkeep
+++ b/docker/nginx/ssl/.gitkeep
@@ -1 +0,0 @@
-
diff --git a/docker/ssrf_proxy/docker-entrypoint.sh b/docker/ssrf_proxy/docker-entrypoint.sh
new file mode 100755
index 00000000000000..613897bb7db09c
--- /dev/null
+++ b/docker/ssrf_proxy/docker-entrypoint.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Modified based on Squid OCI image entrypoint
+
+# This entrypoint aims to forward the squid logs to stdout to assist users of
+# common container related tooling (e.g., kubernetes, docker-compose, etc) to
+# access the service logs.
+
+# Moreover, it invokes the squid binary, leaving all the desired parameters to
+# be provided by the "command" passed to the spawned container. If no command
+# is provided by the user, the default behavior (as per the CMD statement in
+# the Dockerfile) will be to use Ubuntu's default configuration [1] and run
+# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
+# systemd unit.
+
+# [1] The default configuration is changed in the Dockerfile to allow local
+# network connections. See the Dockerfile for further information.
+
+echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
+if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
+ /usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
+fi
+
+tail -F /var/log/squid/access.log 2>/dev/null &
+tail -F /var/log/squid/error.log 2>/dev/null &
+tail -F /var/log/squid/store.log 2>/dev/null &
+tail -F /var/log/squid/cache.log 2>/dev/null &
+
+# Replace environment variables in the template and output to the squid.conf
+echo "[ENTRYPOINT] replacing environment variables in the template"
+awk '{
+ while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
+ var = substr($0, RSTART+2, RLENGTH-3)
+ val = ENVIRON[var]
+ $0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
+ }
+ print
+}' /etc/squid/squid.conf.template > /etc/squid/squid.conf
+
+/usr/sbin/squid -Nz
+echo "[ENTRYPOINT] starting squid"
+/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1
diff --git a/docker/ssrf_proxy/squid.conf.template b/docker/ssrf_proxy/squid.conf.template
new file mode 100644
index 00000000000000..a0875a8826e033
--- /dev/null
+++ b/docker/ssrf_proxy/squid.conf.template
@@ -0,0 +1,50 @@
+acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
+acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
+acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
+acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
+acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
+acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
+acl localnet src fc00::/7 # RFC 4193 local private network range
+acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
+acl SSL_ports port 443
+acl Safe_ports port 80 # http
+acl Safe_ports port 21 # ftp
+acl Safe_ports port 443 # https
+acl Safe_ports port 70 # gopher
+acl Safe_ports port 210 # wais
+acl Safe_ports port 1025-65535 # unregistered ports
+acl Safe_ports port 280 # http-mgmt
+acl Safe_ports port 488 # gss-http
+acl Safe_ports port 591 # filemaker
+acl Safe_ports port 777 # multiling http
+acl CONNECT method CONNECT
+http_access deny !Safe_ports
+http_access deny CONNECT !SSL_ports
+http_access allow localhost manager
+http_access deny manager
+http_access allow localhost
+include /etc/squid/conf.d/*.conf
+http_access deny all
+
+################################## Proxy Server ################################
+http_port ${HTTP_PORT}
+coredump_dir ${COREDUMP_DIR}
+refresh_pattern ^ftp: 1440 20% 10080
+refresh_pattern ^gopher: 1440 0% 1440
+refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
+refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
+refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
+refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
+refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
+refresh_pattern . 0 20% 4320
+
+
+# cache_dir ufs /var/spool/squid 100 16 256
+# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
+# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
+
+################################## Reverse Proxy To Sandbox ################################
+http_port ${REVERSE_PROXY_PORT} accel vhost
+cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
+acl src_all src all
+http_access allow src_all
diff --git a/web/app/components/base/markdown.tsx b/web/app/components/base/markdown.tsx
index 37d9ad691d2817..14a630b2ed74a7 100644
--- a/web/app/components/base/markdown.tsx
+++ b/web/app/components/base/markdown.tsx
@@ -44,7 +44,8 @@ const preprocessLaTeX = (content: string) => {
if (typeof content !== 'string')
return content
return content.replace(/\\\[(.*?)\\\]/gs, (_, equation) => `$$${equation}$$`)
- .replace(/\\\((.*?)\\\)/gs, (_, equation) => `$${equation}$`)
+ .replace(/\\\((.*?)\\\)/gs, (_, equation) => `$$${equation}$$`)
+ .replace(/(^|[^\\])\$(.+?)\$/gs, (_, prefix, equation) => `${prefix}$${equation}$`)
}
export function PreCode(props: { children: any }) {
diff --git a/web/i18n/fr-FR/common.ts b/web/i18n/fr-FR/common.ts
index b0c4c9bbafae91..6d15638ff1ab77 100644
--- a/web/i18n/fr-FR/common.ts
+++ b/web/i18n/fr-FR/common.ts
@@ -10,7 +10,7 @@ const translation = {
create: 'Créer',
confirm: 'Confirmer',
cancel: 'Annuler',
- clear: 'Clair',
+ clear: 'Effacer',
save: 'Enregistrer',
edit: 'Modifier',
add: 'Ajouter',
diff --git a/web/package.json b/web/package.json
index 46ca4ba059d3d7..71819c176c1b11 100644
--- a/web/package.json
+++ b/web/package.json
@@ -1,6 +1,6 @@
{
"name": "dify-web",
- "version": "0.6.11",
+ "version": "0.6.12-fix1",
"private": true,
"scripts": {
"dev": "next dev",
diff --git a/web/yarn.lock b/web/yarn.lock
index f3d0c0e1b06cd9..393e81cf97b969 100644
--- a/web/yarn.lock
+++ b/web/yarn.lock
@@ -1803,11 +1803,11 @@ brace-expansion@^1.1.7:
concat-map "0.0.1"
braces@^3.0.2, braces@~3.0.2:
- version "3.0.2"
- resolved "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz"
- integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==
+ version "3.0.3"
+ resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789"
+ integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==
dependencies:
- fill-range "^7.0.1"
+ fill-range "^7.1.1"
browserslist@^4.21.5:
version "4.22.3"
@@ -3483,10 +3483,10 @@ file-entry-cache@^6.0.1:
dependencies:
flat-cache "^3.0.4"
-fill-range@^7.0.1:
- version "7.0.1"
- resolved "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz"
- integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==
+fill-range@^7.1.1:
+ version "7.1.1"
+ resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292"
+ integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==
dependencies:
to-regex-range "^5.0.1"