From 46236c714a6f719c963b426b507af5deb0777b16 Mon Sep 17 00:00:00 2001 From: "chen, suyue" Date: Fri, 22 Aug 2025 14:20:56 +0800 Subject: [PATCH 01/78] CI/CD enhance (#2216) Signed-off-by: chensuyue --- .github/workflows/scripts/docker_compose_clean_up.sh | 10 ++++------ .github/workflows/scripts/k8s-utils.sh | 2 +- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/scripts/docker_compose_clean_up.sh b/.github/workflows/scripts/docker_compose_clean_up.sh index 1e88bb2c95..f2789d7f5b 100644 --- a/.github/workflows/scripts/docker_compose_clean_up.sh +++ b/.github/workflows/scripts/docker_compose_clean_up.sh @@ -19,7 +19,7 @@ case "$1" in containers=$(cat $yaml_file | grep container_name | cut -d':' -f2) for container_name in $containers; do cid=$(docker ps -aq --filter "name=$container_name") - if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid && sleep 1s; fi + if [[ -n "$cid" ]]; then docker stop "$cid" && docker rm "$cid" && sleep 1s; fi done ;; ports) @@ -32,11 +32,9 @@ case "$1" in if [[ $port =~ [a-zA-Z_-] ]]; then echo "Search port value $port from the test case..." port_fix=$(grep -E "export $port=" tests/$test_case | cut -d'=' -f2) - if [[ "$port_fix" == "" ]]; then - echo "Can't find the port value from the test case, use the default value in yaml..." - port_fix=$(yq '.services[].ports[]' $yaml_file | grep $port | cut -d':' -f2 | grep -o '[0-9a-zA-Z]\+') + if [[ "$port_fix" ]]; then + port=$port_fix fi - port=$port_fix fi if [[ $port =~ [0-9] ]]; then if [[ $port == 5000 ]]; then @@ -45,7 +43,7 @@ case "$1" in fi echo "Check port $port..." cid=$(docker ps --filter "publish=${port}" --format "{{.ID}}") - if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid && echo "release $port"; fi + if [[ -n "$cid" ]]; then docker stop "$cid" && docker rm "$cid" && echo "release $port"; fi fi done ;; diff --git a/.github/workflows/scripts/k8s-utils.sh b/.github/workflows/scripts/k8s-utils.sh index 0676a80d38..aa2a4f6d05 100755 --- a/.github/workflows/scripts/k8s-utils.sh +++ b/.github/workflows/scripts/k8s-utils.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +# set -e function dump_pod_log() { pod_name=$1 From b5e844e4f72504c8a85a64fe1747aa11bc4c9fb8 Mon Sep 17 00:00:00 2001 From: "chen, suyue" Date: Fri, 22 Aug 2025 14:43:08 +0800 Subject: [PATCH 02/78] Sync helm charts values with GenAIInfra (#2219) Signed-off-by: chensuyue --- AgentQnA/kubernetes/helm/rocm-tgi-values.yaml | 57 +++++++++++++++++++ AgentQnA/kubernetes/helm/rocm-values.yaml | 52 +++++++++++++++++ .../helm/faqgen-gaudi-tgi-values.yaml | 4 +- .../kubernetes/helm/faqgen-gaudi-values.yaml | 4 +- .../helm/faqgen-rocm-tgi-values.yaml | 52 +++++++++++++++++ .../kubernetes/helm/faqgen-rocm-values.yaml | 45 +++++++++++++++ ChatQnA/kubernetes/helm/gaudi-tgi-values.yaml | 8 +-- ChatQnA/kubernetes/helm/gaudi-values.yaml | 4 +- .../helm/guardrails-gaudi-values.yaml | 8 +-- ChatQnA/kubernetes/helm/rocm-tgi-values.yaml | 47 +++++++++++++++ ChatQnA/kubernetes/helm/rocm-values.yaml | 39 +++++++++++++ SearchQnA/kubernetes/helm/gaudi-values.yaml | 4 +- 12 files changed, 308 insertions(+), 16 deletions(-) create mode 100644 AgentQnA/kubernetes/helm/rocm-tgi-values.yaml create mode 100644 AgentQnA/kubernetes/helm/rocm-values.yaml create mode 100644 ChatQnA/kubernetes/helm/faqgen-rocm-tgi-values.yaml create mode 100644 ChatQnA/kubernetes/helm/faqgen-rocm-values.yaml create mode 100644 ChatQnA/kubernetes/helm/rocm-tgi-values.yaml create mode 100644 ChatQnA/kubernetes/helm/rocm-values.yaml diff --git a/AgentQnA/kubernetes/helm/rocm-tgi-values.yaml b/AgentQnA/kubernetes/helm/rocm-tgi-values.yaml new file mode 100644 index 0000000000..9c2bc98eb8 --- /dev/null +++ b/AgentQnA/kubernetes/helm/rocm-tgi-values.yaml @@ -0,0 +1,57 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Accelerate inferencing in heaviest components to improve performance +# by overriding their subchart values +vllm: + enabled: false +tgi: + enabled: true + accelDevice: "rocm" + image: + repository: ghcr.io/huggingface/text-generation-inference + tag: "3.0.0-rocm" + LLM_MODEL_ID: meta-llama/Llama-3.3-70B-Instruct + MAX_INPUT_LENGTH: "2048" + MAX_TOTAL_TOKENS: "4096" + PYTORCH_TUNABLEOP_ENABLED: "0" + USE_FLASH_ATTENTION: "true" + FLASH_ATTENTION_RECOMPUTE: "false" + HIP_VISIBLE_DEVICES: "0,1" + MAX_BATCH_SIZE: "4" + extraCmdArgs: [ "--num-shard","2" ] + resources: + limits: + amd.com/gpu: "2" + requests: + cpu: 1 + memory: 16Gi + securityContext: + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + capabilities: + add: + - SYS_PTRACE + readinessProbe: + initialDelaySeconds: 60 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 120 + startupProbe: + initialDelaySeconds: 60 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 120 +supervisor: + llm_endpoint_url: http://{{ .Release.Name }}-tgi + llm_engine: tgi + model: "meta-llama/Llama-3.3-70B-Instruct" +ragagent: + llm_endpoint_url: http://{{ .Release.Name }}-tgi + llm_engine: tgi + model: "meta-llama/Llama-3.3-70B-Instruct" +sqlagent: + llm_endpoint_url: http://{{ .Release.Name }}-tgi + llm_engine: tgi + model: "meta-llama/Llama-3.3-70B-Instruct" diff --git a/AgentQnA/kubernetes/helm/rocm-values.yaml b/AgentQnA/kubernetes/helm/rocm-values.yaml new file mode 100644 index 0000000000..0d5393b70f --- /dev/null +++ b/AgentQnA/kubernetes/helm/rocm-values.yaml @@ -0,0 +1,52 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Accelerate inferencing in heaviest components to improve performance +# by overriding their subchart values + +tgi: + enabled: false +vllm: + enabled: true + accelDevice: "rocm" + image: + repository: opea/vllm-rocm + tag: latest + env: + LLM_MODEL_ID: meta-llama/Llama-3.3-70B-Instruct + HIP_VISIBLE_DEVICES: "0,1" + TENSOR_PARALLEL_SIZE: "2" + HF_HUB_DISABLE_PROGRESS_BARS: "1" + HF_HUB_ENABLE_HF_TRANSFER: "0" + VLLM_USE_TRITON_FLASH_ATTN: "0" + VLLM_WORKER_MULTIPROC_METHOD: "spawn" + PYTORCH_JIT: "0" + HF_HOME: "/data" + extraCmd: + command: [ "python3", "/workspace/api_server.py" ] + extraCmdArgs: [ "--swap-space", "16", + "--disable-log-requests", + "--dtype", "float16", + "--num-scheduler-steps", "1", + "--distributed-executor-backend", "mp" ] + resources: + limits: + amd.com/gpu: "2" + startupProbe: + failureThreshold: 180 + securityContext: + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 +supervisor: + llm_endpoint_url: http://{{ .Release.Name }}-vllm + llm_engine: vllm + model: "meta-llama/Llama-3.3-70B-Instruct" +ragagent: + llm_endpoint_url: http://{{ .Release.Name }}-vllm + llm_engine: vllm + model: "meta-llama/Llama-3.3-70B-Instruct" +sqlagent: + llm_endpoint_url: http://{{ .Release.Name }}-vllm + llm_engine: vllm + model: "meta-llama/Llama-3.3-70B-Instruct" diff --git a/ChatQnA/kubernetes/helm/faqgen-gaudi-tgi-values.yaml b/ChatQnA/kubernetes/helm/faqgen-gaudi-tgi-values.yaml index 88fca4ed55..99b7fb5c58 100644 --- a/ChatQnA/kubernetes/helm/faqgen-gaudi-tgi-values.yaml +++ b/ChatQnA/kubernetes/helm/faqgen-gaudi-tgi-values.yaml @@ -49,8 +49,8 @@ teirerank: OMPI_MCA_btl_vader_single_copy_mechanism: "none" MAX_WARMUP_SEQUENCE_LENGTH: "512" image: - repository: ghcr.io/huggingface/tei-gaudi - tag: 1.5.0 + repository: ghcr.io/huggingface/text-embeddings-inference + tag: hpu-1.7 resources: limits: habana.ai/gaudi: 1 diff --git a/ChatQnA/kubernetes/helm/faqgen-gaudi-values.yaml b/ChatQnA/kubernetes/helm/faqgen-gaudi-values.yaml index 7dd455e112..6e34ab4b09 100644 --- a/ChatQnA/kubernetes/helm/faqgen-gaudi-values.yaml +++ b/ChatQnA/kubernetes/helm/faqgen-gaudi-values.yaml @@ -42,8 +42,8 @@ teirerank: OMPI_MCA_btl_vader_single_copy_mechanism: "none" MAX_WARMUP_SEQUENCE_LENGTH: "512" image: - repository: ghcr.io/huggingface/tei-gaudi - tag: 1.5.0 + repository: ghcr.io/huggingface/text-embeddings-inference + tag: hpu-1.7 resources: limits: habana.ai/gaudi: 1 diff --git a/ChatQnA/kubernetes/helm/faqgen-rocm-tgi-values.yaml b/ChatQnA/kubernetes/helm/faqgen-rocm-tgi-values.yaml new file mode 100644 index 0000000000..19ca79f10a --- /dev/null +++ b/ChatQnA/kubernetes/helm/faqgen-rocm-tgi-values.yaml @@ -0,0 +1,52 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +CHATQNA_TYPE: "CHATQNA_FAQGEN" +llm-uservice: + enabled: true + image: + repository: opea/llm-faqgen + LLM_MODEL_ID: meta-llama/Meta-Llama-3-8B-Instruct + FAQGEN_BACKEND: "TGI" + service: + port: 80 +tgi: + enabled: true + accelDevice: "rocm" + image: + repository: ghcr.io/huggingface/text-generation-inference + tag: "3.0.0-rocm" + LLM_MODEL_ID: meta-llama/Meta-Llama-3-8B-Instruct + MAX_INPUT_LENGTH: "2048" + MAX_TOTAL_TOKENS: "4096" + USE_FLASH_ATTENTION: "true" + FLASH_ATTENTION_RECOMPUTE: "false" + PYTORCH_TUNABLEOP_ENABLED: "0" + HIP_VISIBLE_DEVICES: "0,1" + MAX_BATCH_SIZE: "4" + extraCmdArgs: [ "--num-shard","2" ] + resources: + limits: + amd.com/gpu: "2" + requests: + cpu: 1 + memory: 16Gi + securityContext: + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + capabilities: + add: + - SYS_PTRACE + readinessProbe: + initialDelaySeconds: 60 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 120 + startupProbe: + initialDelaySeconds: 60 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 120 +vllm: + enabled: false diff --git a/ChatQnA/kubernetes/helm/faqgen-rocm-values.yaml b/ChatQnA/kubernetes/helm/faqgen-rocm-values.yaml new file mode 100644 index 0000000000..e8941d8153 --- /dev/null +++ b/ChatQnA/kubernetes/helm/faqgen-rocm-values.yaml @@ -0,0 +1,45 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +CHATQNA_TYPE: "CHATQNA_FAQGEN" +llm-uservice: + enabled: true + image: + repository: opea/llm-faqgen + LLM_MODEL_ID: meta-llama/Meta-Llama-3-8B-Instruct + FAQGEN_BACKEND: "vLLM" + service: + port: 80 +tgi: + enabled: false +vllm: + enabled: true + accelDevice: "rocm" + image: + repository: opea/vllm-rocm + tag: latest + env: + HIP_VISIBLE_DEVICES: "0" + TENSOR_PARALLEL_SIZE: "1" + HF_HUB_DISABLE_PROGRESS_BARS: "1" + HF_HUB_ENABLE_HF_TRANSFER: "0" + VLLM_USE_TRITON_FLASH_ATTN: "0" + VLLM_WORKER_MULTIPROC_METHOD: "spawn" + PYTORCH_JIT: "0" + HF_HOME: "/data" + extraCmd: + command: [ "python3", "/workspace/api_server.py" ] + extraCmdArgs: [ "--swap-space", "16", + "--disable-log-requests", + "--dtype", "float16", + "--num-scheduler-steps", "1", + "--distributed-executor-backend", "mp" ] + resources: + limits: + amd.com/gpu: "1" + startupProbe: + failureThreshold: 180 + securityContext: + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 diff --git a/ChatQnA/kubernetes/helm/gaudi-tgi-values.yaml b/ChatQnA/kubernetes/helm/gaudi-tgi-values.yaml index 027fecb3fd..c5fc0fa8f8 100644 --- a/ChatQnA/kubernetes/helm/gaudi-tgi-values.yaml +++ b/ChatQnA/kubernetes/helm/gaudi-tgi-values.yaml @@ -43,8 +43,8 @@ teirerank: OMPI_MCA_btl_vader_single_copy_mechanism: "none" MAX_WARMUP_SEQUENCE_LENGTH: "512" image: - repository: ghcr.io/huggingface/tei-gaudi - tag: 1.5.0 + repository: ghcr.io/huggingface/text-embeddings-inference + tag: hpu-1.7 resources: limits: habana.ai/gaudi: 1 @@ -60,8 +60,8 @@ teirerank: # OMPI_MCA_btl_vader_single_copy_mechanism: "none" # MAX_WARMUP_SEQUENCE_LENGTH: "512" # image: -# repository: ghcr.io/huggingface/tei-gaudi -# tag: 1.5.0 +# repository: ghcr.io/huggingface/text-embeddings-inference +# tag: hpu-1.7 # resources: # limits: # habana.ai/gaudi: 1 diff --git a/ChatQnA/kubernetes/helm/gaudi-values.yaml b/ChatQnA/kubernetes/helm/gaudi-values.yaml index 19471c0e43..36a1ee29de 100644 --- a/ChatQnA/kubernetes/helm/gaudi-values.yaml +++ b/ChatQnA/kubernetes/helm/gaudi-values.yaml @@ -37,8 +37,8 @@ teirerank: OMPI_MCA_btl_vader_single_copy_mechanism: "none" MAX_WARMUP_SEQUENCE_LENGTH: "512" image: - repository: ghcr.io/huggingface/tei-gaudi - tag: 1.5.0 + repository: ghcr.io/huggingface/text-embeddings-inference + tag: hpu-1.7 resources: limits: habana.ai/gaudi: 1 diff --git a/ChatQnA/kubernetes/helm/guardrails-gaudi-values.yaml b/ChatQnA/kubernetes/helm/guardrails-gaudi-values.yaml index 5c88e73d0f..bc2faef9a8 100644 --- a/ChatQnA/kubernetes/helm/guardrails-gaudi-values.yaml +++ b/ChatQnA/kubernetes/helm/guardrails-gaudi-values.yaml @@ -19,8 +19,8 @@ guardrails-usvc: # tei: # accelDevice: "gaudi" # image: -# repository: ghcr.io/huggingface/tei-gaudi -# tag: 1.5.0 +# repository: ghcr.io/huggingface/text-embeddings-inference +# tag: hpu-1.7 # resources: # limits: # habana.ai/gaudi: 1 @@ -32,8 +32,8 @@ teirerank: OMPI_MCA_btl_vader_single_copy_mechanism: "none" MAX_WARMUP_SEQUENCE_LENGTH: "512" image: - repository: ghcr.io/huggingface/tei-gaudi - tag: "1.5.0" + repository: ghcr.io/huggingface/text-embeddings-inference + tag: hpu-1.7 resources: limits: habana.ai/gaudi: 1 diff --git a/ChatQnA/kubernetes/helm/rocm-tgi-values.yaml b/ChatQnA/kubernetes/helm/rocm-tgi-values.yaml new file mode 100644 index 0000000000..1a76b460d3 --- /dev/null +++ b/ChatQnA/kubernetes/helm/rocm-tgi-values.yaml @@ -0,0 +1,47 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Accelerate inferencing in heaviest components to improve performance +# by overriding their subchart values + +tgi: + enabled: true + accelDevice: "rocm" + image: + repository: ghcr.io/huggingface/text-generation-inference + tag: "3.0.0-rocm" + LLM_MODEL_ID: meta-llama/Meta-Llama-3-8B-Instruct + MAX_INPUT_LENGTH: "2048" + MAX_TOTAL_TOKENS: "4096" + PYTORCH_TUNABLEOP_ENABLED: "0" + USE_FLASH_ATTENTION: "true" + FLASH_ATTENTION_RECOMPUTE: "true" + HIP_VISIBLE_DEVICES: "0,1" + MAX_BATCH_SIZE: "4" + extraCmdArgs: [ "--num-shard","2" ] + resources: + limits: + amd.com/gpu: "2" + requests: + cpu: 1 + memory: 16Gi + securityContext: + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + capabilities: + add: + - SYS_PTRACE + readinessProbe: + initialDelaySeconds: 60 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 120 + startupProbe: + initialDelaySeconds: 60 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 120 + +vllm: + enabled: false diff --git a/ChatQnA/kubernetes/helm/rocm-values.yaml b/ChatQnA/kubernetes/helm/rocm-values.yaml new file mode 100644 index 0000000000..4d637bd8c6 --- /dev/null +++ b/ChatQnA/kubernetes/helm/rocm-values.yaml @@ -0,0 +1,39 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Accelerate inferencing in heaviest components to improve performance +# by overriding their subchart values + +tgi: + enabled: false +vllm: + enabled: true + accelDevice: "rocm" + image: + repository: opea/vllm-rocm + tag: latest + env: + HIP_VISIBLE_DEVICES: "0" + TENSOR_PARALLEL_SIZE: "1" + HF_HUB_DISABLE_PROGRESS_BARS: "1" + HF_HUB_ENABLE_HF_TRANSFER: "0" + VLLM_USE_TRITON_FLASH_ATTN: "0" + VLLM_WORKER_MULTIPROC_METHOD: "spawn" + PYTORCH_JIT: "0" + HF_HOME: "/data" + extraCmd: + command: [ "python3", "/workspace/api_server.py" ] + extraCmdArgs: [ "--swap-space", "16", + "--disable-log-requests", + "--dtype", "float16", + "--num-scheduler-steps", "1", + "--distributed-executor-backend", "mp" ] + resources: + limits: + amd.com/gpu: "1" + startupProbe: + failureThreshold: 180 + securityContext: + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 diff --git a/SearchQnA/kubernetes/helm/gaudi-values.yaml b/SearchQnA/kubernetes/helm/gaudi-values.yaml index a1abc1a447..35a0dd9cd2 100644 --- a/SearchQnA/kubernetes/helm/gaudi-values.yaml +++ b/SearchQnA/kubernetes/helm/gaudi-values.yaml @@ -31,8 +31,8 @@ tgi: tei: accelDevice: "gaudi" image: - repository: ghcr.io/huggingface/tei-gaudi - tag: "1.5.0" + repository: ghcr.io/huggingface/text-embeddings-inference + tag: "hpu-1.7" OMPI_MCA_btl_vader_single_copy_mechanism: none MAX_WARMUP_SEQUENCE_LENGTH: 512 securityContext: From 284ef6444125c860519ad83346e29d543b4e10e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Aug 2025 16:30:41 +0800 Subject: [PATCH 03/78] Bump llama-index-core from 0.12.37 to 0.12.41 in /EdgeCraftRAG/edgecraftrag (#2142) Signed-off-by: dependabot[bot] --- EdgeCraftRAG/edgecraftrag/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/EdgeCraftRAG/edgecraftrag/requirements.txt b/EdgeCraftRAG/edgecraftrag/requirements.txt index 94cbef7b6c..40299de2ee 100644 --- a/EdgeCraftRAG/edgecraftrag/requirements.txt +++ b/EdgeCraftRAG/edgecraftrag/requirements.txt @@ -4,7 +4,7 @@ faiss-cpu>=1.8.0.post1 html2text>=2025.4.15 langchain-core==0.3.60 llama-index==0.12.41 -llama-index-core==0.12.37 +llama-index-core==0.12.41 llama-index-embeddings-openvino==0.5.2 llama-index-llms-openai==0.3.44 llama-index-llms-openai-like==0.3.4 From becdd61bf6414f0c212a88057d60cd5824700d15 Mon Sep 17 00:00:00 2001 From: "chen, suyue" Date: Fri, 22 Aug 2025 16:35:31 +0800 Subject: [PATCH 04/78] Update v1.4 Release Test Config (#2220) Signed-off-by: chensuyue --- ChatQnA/kubernetes/helm/README.md | 2 +- validated_configurations.md | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/ChatQnA/kubernetes/helm/README.md b/ChatQnA/kubernetes/helm/README.md index 8ada19b070..5186fe3c8d 100644 --- a/ChatQnA/kubernetes/helm/README.md +++ b/ChatQnA/kubernetes/helm/README.md @@ -14,7 +14,7 @@ helm install chatqna oci://ghcr.io/opea-project/charts/chatqna --set global.HUG ``` export HFTOKEN="insert-your-huggingface-token-here" -helm install chatqna oci://ghcr.io/opea-project/charts/chatqna --set global.HUGGINGFACEHUB_API_TOKEN=${HFTOKEN} -f gaudi-vllm-values.yaml +helm install chatqna oci://ghcr.io/opea-project/charts/chatqna --set global.HUGGINGFACEHUB_API_TOKEN=${HFTOKEN} -f gaudi-values.yaml ``` ## Deploy variants of ChatQnA diff --git a/validated_configurations.md b/validated_configurations.md index 0816c9ef89..fea6438b54 100644 --- a/validated_configurations.md +++ b/validated_configurations.md @@ -21,3 +21,21 @@ Example specific test matrix can be found in examples' README.md files, for exam | | vLLM v0.8.3 (Xeon, ROCm) | | | TGI v2.4.0 (Xeon), v2.3.1 (Gaudi), v2.4.1 (ROCm) | | | TEI v1.6 | + +## v1.4 Release Test Config Overview + +| **HW/SW Stake** | **Description** | +| ---------------------- | -------------------------------------------------------------------------------------- | +| **Validated Hardware** | Intel Gaudi AI Accelerators (2nd) | +| | Intel Xeon Scalable processor (3rd) | +| | Intel Arc Graphics GPU (A770) | +| | AMD EPYC processors (4th, 5th) | +| **Validated Software** | Ubuntu 22.04 | +| | Habana v1.21 ([link](https://docs.habana.ai/en/v1.21.2/Installation_Guide/index.html)) | +| | Docker version 28.3.3 | +| | Docker Compose version v2.39.1 | +| | Kubernetes v1.32.7 | +| | HabanaAI vLLM v0.6.6.post1+Gaudi-1.20.0 | +| | vLLM v1.10.0 | +| | TGI v2.4.0 (Xeon), v2.3.1 (Gaudi), v2.4.1 (ROCm) | +| | TEI v1.7 | From c483e398beabb1ee64447ade933faaae433166af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Aug 2025 16:48:59 +0800 Subject: [PATCH 05/78] Bump @sveltejs/kit from 2.0.0 to 2.20.6 in /Text2Image/ui/svelte (#2214) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Text2Image/ui/svelte/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Text2Image/ui/svelte/package.json b/Text2Image/ui/svelte/package.json index fd5c4ded03..401bf24fbd 100644 --- a/Text2Image/ui/svelte/package.json +++ b/Text2Image/ui/svelte/package.json @@ -18,7 +18,7 @@ "@fortawesome/free-solid-svg-icons": "6.2.0", "@playwright/test": "^1.45.2", "@sveltejs/adapter-auto": "^3.0.0", - "@sveltejs/kit": "2.0.0", + "@sveltejs/kit": "2.20.6", "@sveltejs/vite-plugin-svelte": "^3.0.0", "@tailwindcss/typography": "0.5.7", "@types/debug": "4.1.7", From c78904afa916917260bd0ab3b3b689791925c44f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Aug 2025 16:50:12 +0800 Subject: [PATCH 06/78] Bump svelte from 4.2.7 to 4.2.19 in /Translation/ui/svelte (#2108) Signed-off-by: dependabot[bot] --- Translation/ui/svelte/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Translation/ui/svelte/package.json b/Translation/ui/svelte/package.json index 4dd56be28f..c36dcc1790 100644 --- a/Translation/ui/svelte/package.json +++ b/Translation/ui/svelte/package.json @@ -27,7 +27,7 @@ "postcss": "^8.4.32", "postcss-load-config": "^5.0.2", "publint": "^0.1.9", - "svelte": "4.2.7", + "svelte": "4.2.19", "svelte-check": "^3.6.0", "svelte-highlight": "^7.6.0", "tailwindcss": "^3.3.6", From 57468233a9a37efeb9d399d805d9b5a7af92e3a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 10:14:47 +0800 Subject: [PATCH 07/78] Bump @sveltejs/kit from 2.0.0 to 2.20.6 in /HybridRAG/ui/svelte (#2212) Signed-off-by: dependabot[bot] --- HybridRAG/ui/svelte/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HybridRAG/ui/svelte/package.json b/HybridRAG/ui/svelte/package.json index eaa981570a..d62d64712b 100644 --- a/HybridRAG/ui/svelte/package.json +++ b/HybridRAG/ui/svelte/package.json @@ -18,7 +18,7 @@ "@fortawesome/free-solid-svg-icons": "6.2.0", "@playwright/test": "^1.45.2", "@sveltejs/adapter-auto": "^3.0.0", - "@sveltejs/kit": "2.0.0", + "@sveltejs/kit": "2.20.6", "@sveltejs/vite-plugin-svelte": "^3.0.0", "@tailwindcss/typography": "0.5.7", "@types/debug": "4.1.7", From 6647c8586f4f1f6a420711907bc6cf6493a9b0d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 17:28:15 +0800 Subject: [PATCH 08/78] Bump @sveltejs/kit from 2.0.0 to 2.20.6 in /DocSum/ui/svelte (#2224) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- DocSum/ui/svelte/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DocSum/ui/svelte/package.json b/DocSum/ui/svelte/package.json index d5efc343c0..f6054bc229 100644 --- a/DocSum/ui/svelte/package.json +++ b/DocSum/ui/svelte/package.json @@ -27,7 +27,7 @@ "devDependencies": { "@playwright/test": "^1.44.1", "@sveltejs/adapter-auto": "^3.0.0", - "@sveltejs/kit": "2.0.0", + "@sveltejs/kit": "2.20.6", "@sveltejs/package": "^2.0.0", "@sveltejs/vite-plugin-svelte": "^3.0.0", "autoprefixer": "^10.4.16", From 827affc17388823478079b99a68e310b59f47feb Mon Sep 17 00:00:00 2001 From: WenjiaoYue Date: Mon, 25 Aug 2025 21:11:34 +0800 Subject: [PATCH 09/78] Fix copy feature in codeTrans (#2206) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CodeTrans/ui/svelte/src/routes/+page.svelte | 33 ++++++++++++++++++--- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/CodeTrans/ui/svelte/src/routes/+page.svelte b/CodeTrans/ui/svelte/src/routes/+page.svelte index fd6be39310..af19e26d31 100644 --- a/CodeTrans/ui/svelte/src/routes/+page.svelte +++ b/CodeTrans/ui/svelte/src/routes/+page.svelte @@ -83,12 +83,37 @@ let deleteFlag: boolean = false; let inputClick: boolean = true; - function handelCopy() { - navigator.clipboard.writeText(output); - copyText = "copied!"; + async function handelCopy() { + try { + if (navigator.clipboard && navigator.clipboard.writeText) { + await navigator.clipboard.writeText(output); + copyText = "copied!"; + } else { + const textArea = document.createElement('textarea'); + textArea.value = output; + textArea.style.position = 'fixed'; + textArea.style.left = '-999999px'; + textArea.style.top = '-999999px'; + document.body.appendChild(textArea); + textArea.focus(); + textArea.select(); + + if (document.execCommand('copy')) { + copyText = "copied!"; + } else { + copyText = "copy failed"; + } + + document.body.removeChild(textArea); + } + } catch (err) { + console.error('Copy failed:', err); + copyText = "copy failed"; + } + setTimeout(() => { copyText = "copy"; - }, 1000); + }, 2000); } function handelInputClick() { From 24239069e5da139b6ecfe0ab9f3bb42ef6525730 Mon Sep 17 00:00:00 2001 From: WenjiaoYue Date: Mon, 25 Aug 2025 21:14:51 +0800 Subject: [PATCH 10/78] Fix UI sending chat history to backend (#2225) Signed-off-by: WenjiaoYue --- .../ui/svelte/src/lib/network/chat/Network.ts | 2 +- ChatQnA/ui/svelte/src/routes/+page.svelte | 20 +++++++++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/ChatQnA/ui/svelte/src/lib/network/chat/Network.ts b/ChatQnA/ui/svelte/src/lib/network/chat/Network.ts index 060c5a5ffb..f7fb57c9b3 100644 --- a/ChatQnA/ui/svelte/src/lib/network/chat/Network.ts +++ b/ChatQnA/ui/svelte/src/lib/network/chat/Network.ts @@ -18,7 +18,7 @@ import { SSE } from "sse.js"; const CHAT_BASE_URL = env.CHAT_BASE_URL; const MODEL_ID = env.MODEL_ID; -export async function fetchTextStream(query: string) { +export async function fetchTextStream(query: object) { let payload = {}; let url = ""; let modelId = "meta-llama/Meta-Llama-3-8B-Instruct"; diff --git a/ChatQnA/ui/svelte/src/routes/+page.svelte b/ChatQnA/ui/svelte/src/routes/+page.svelte index bcd0b8b708..3d7f6afadf 100644 --- a/ChatQnA/ui/svelte/src/routes/+page.svelte +++ b/ChatQnA/ui/svelte/src/routes/+page.svelte @@ -102,7 +102,7 @@ return decoded; } - const callTextStream = async (query: string, startSendTime: number) => { + const callTextStream = async (query: object, startSendTime: number) => { try { const eventSource = await fetchTextStream(query); eventSource.addEventListener("error", (e: any) => { @@ -179,6 +179,22 @@ } }; + function mapRole(r: number): "user" | "assistant" | "system" { + if (r === 1) return "user"; + if (r === 0) return "assistant"; + return "system"; + } + + function multiMessages( + history: any[] + ): { role: "user" | "assistant" | "system"; content: string }[] { + return history.map((m) => ({ + role: mapRole(m.role), + content: + typeof m.content === "string" ? m.content : String(m.content ?? ""), + })); + } + const handleTextSubmit = async () => { loading = true; const newMessage = { @@ -192,7 +208,7 @@ storeMessages(); query = ""; - await callTextStream(newMessage.content, getCurrentTimeStamp()); + await callTextStream(multiMessages(chatMessages), getCurrentTimeStamp()); scrollToBottom(scrollToDiv); storeMessages(); From 0eabdbfe947c63bd6ece2a3fc34c5c8d97ea9833 Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Wed, 27 Aug 2025 09:31:57 +0800 Subject: [PATCH 11/78] Fix build fail for EdgeCraftRAG. (#2218) Signed-off-by: ZePan110 Signed-off-by: chensuyue Co-authored-by: chensuyue --- .github/workflows/pr-image-size.yml | 15 +++++++++++---- EdgeCraftRAG/edgecraftrag/requirements.txt | 4 ++-- HybridRAG/docker_image_build/build.yaml | 2 +- HybridRAG/tests/test_compose_on_gaudi.sh | 2 +- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/pr-image-size.yml b/.github/workflows/pr-image-size.yml index bbed2b0d88..cc2788776d 100644 --- a/.github/workflows/pr-image-size.yml +++ b/.github/workflows/pr-image-size.yml @@ -46,8 +46,8 @@ jobs: matrix: dockerfile: ${{ fromJson(needs.get-check-list.outputs.files) }} fail-fast: false - # outputs: - # comments: ${{ steps.build-check.outputs.comment_message }} + outputs: + skip: ${{ steps.build-check.outputs.skip }} steps: - name: Checkout PR branch uses: actions/checkout@v4 @@ -92,7 +92,11 @@ jobs: echo "Building base image for $dockerfile" git checkout ${{ github.event.pull_request.base.sha }} echo "::group::Build image_base" - docker build -f $file -t "$image_base" --no-cache . || true + if ! docker build -f "$file" -t "$image_base" --no-cache .; then + echo "skip=true" >> $GITHUB_ENV + echo "skip=true" >> $GITHUB_OUTPUT + exit 0 + fi echo "::endgroup::" size_base=$(docker image inspect "$image_base" | jq '.[0].Size / (1024 * 1024) | round') @@ -123,6 +127,7 @@ jobs: echo "summary_path=${{github.workspace}}/build-$image_name.md" >> $GITHUB_ENV - name: Download origin artifact log + if: env.skip != 'true' uses: actions/download-artifact@v4 with: name: build-comments @@ -130,12 +135,14 @@ jobs: continue-on-error: true - name: Merge logs + if: env.skip != 'true' run: | mkdir -p merged-files ls merged-files/ cp ${{ env.summary_path }} merged-files/ - name: Save Summary as Artifact + if: env.skip != 'true' uses: actions/upload-artifact@v4 with: name: build-comments @@ -146,7 +153,7 @@ jobs: needs: build-and-check permissions: actions: read - if: always() + if: always() && needs.build-and-check.outputs.skip != 'true' runs-on: ubuntu-latest outputs: all_comments: ${{ steps.summary.outputs.all_comments }} diff --git a/EdgeCraftRAG/edgecraftrag/requirements.txt b/EdgeCraftRAG/edgecraftrag/requirements.txt index 40299de2ee..8dc53e6c83 100644 --- a/EdgeCraftRAG/edgecraftrag/requirements.txt +++ b/EdgeCraftRAG/edgecraftrag/requirements.txt @@ -6,8 +6,8 @@ langchain-core==0.3.60 llama-index==0.12.41 llama-index-core==0.12.41 llama-index-embeddings-openvino==0.5.2 -llama-index-llms-openai==0.3.44 -llama-index-llms-openai-like==0.3.4 +llama-index-llms-openai==0.4.0 +llama-index-llms-openai-like==0.4.0 llama-index-llms-openvino==0.4.0 llama-index-postprocessor-openvino-rerank==0.4.1 llama-index-readers-file==0.4.7 diff --git a/HybridRAG/docker_image_build/build.yaml b/HybridRAG/docker_image_build/build.yaml index 7c23807736..75dbef2c35 100644 --- a/HybridRAG/docker_image_build/build.yaml +++ b/HybridRAG/docker_image_build/build.yaml @@ -13,7 +13,7 @@ services: context: ../ dockerfile: ./Dockerfile image: ${REGISTRY:-opea}/hybridrag:${TAG:-latest} - text2cypher: + text2cypher-gaudi: build: context: GenAIComps dockerfile: comps/text2cypher/src/Dockerfile.intel_hpu diff --git a/HybridRAG/tests/test_compose_on_gaudi.sh b/HybridRAG/tests/test_compose_on_gaudi.sh index 39ced4f92c..c870dbecd4 100755 --- a/HybridRAG/tests/test_compose_on_gaudi.sh +++ b/HybridRAG/tests/test_compose_on_gaudi.sh @@ -36,7 +36,7 @@ function build_docker_images() { cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="hybridrag hybridrag-ui dataprep retriever text2cypher vllm nginx" + service_list="hybridrag hybridrag-ui dataprep retriever text2cypher-gaudi vllm nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log 2>&1 docker images && sleep 1s From ed1cf289283297fbf1135ad533e9651f0cf96dab Mon Sep 17 00:00:00 2001 From: Zhu Yongbo Date: Wed, 27 Aug 2025 16:44:18 +0800 Subject: [PATCH 12/78] fix Chinese doc retriever (#2226) Signed-off-by: Yongbozzz --- EdgeCraftRAG/edgecraftrag/api/v1/data.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/data.py b/EdgeCraftRAG/edgecraftrag/api/v1/data.py index 35ef7e25b7..a000e46e88 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/data.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/data.py @@ -6,7 +6,6 @@ from edgecraftrag.api_schema import DataIn, FilesIn from edgecraftrag.context import ctx from fastapi import FastAPI, File, HTTPException, UploadFile, status -from werkzeug.utils import secure_filename data_app = FastAPI() @@ -110,9 +109,9 @@ async def upload_file(file_name: str, file: UploadFile = File(...)): status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid file_name: directory traversal detected" ) os.makedirs(UPLOAD_DIRECTORY, exist_ok=True) - safe_filename = secure_filename(file.filename) + safe_filename = file.filename # Sanitize the uploaded file's name - safe_filename = secure_filename(file.filename) + safe_filename = file.filename file_path = os.path.normpath(os.path.join(UPLOAD_DIRECTORY, safe_filename)) # Ensure file_path is within UPLOAD_DIRECTORY if not file_path.startswith(os.path.abspath(UPLOAD_DIRECTORY)): From e8e1564ddea90df6f27bb5d6cedd46695e7f04e8 Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Mon, 8 Sep 2025 14:45:22 +0800 Subject: [PATCH 13/78] Use vllm release image for AudioQnA and ChatQnA. (#2239) Signed-off-by: ZePan110 --- AudioQnA/docker_compose/amd/cpu/epyc/compose.yaml | 2 +- .../docker_compose/intel/cpu/xeon/compose.yaml | 2 +- .../intel/cpu/xeon/compose_multilang.yaml | 2 +- AudioQnA/docker_image_build/build.yaml | 6 ------ AudioQnA/tests/test_compose_multilang_on_xeon.sh | 8 +------- AudioQnA/tests/test_compose_on_epyc.sh | 14 +------------- AudioQnA/tests/test_compose_on_xeon.sh | 8 +------- ChatQnA/docker_compose/amd/cpu/epyc/compose.yaml | 2 +- .../amd/cpu/epyc/compose_faqgen.yaml | 2 +- .../amd/cpu/epyc/compose_milvus.yaml | 2 +- .../amd/cpu/epyc/compose_pinecone.yaml | 2 +- .../amd/cpu/epyc/compose_qdrant.yaml | 2 +- .../amd/cpu/epyc/compose_without_rerank.yaml | 2 +- ChatQnA/docker_compose/intel/cpu/xeon/compose.yaml | 2 +- .../intel/cpu/xeon/compose_faqgen.yaml | 2 +- .../intel/cpu/xeon/compose_mariadb.yaml | 2 +- .../intel/cpu/xeon/compose_milvus.yaml | 2 +- .../intel/cpu/xeon/compose_pinecone.yaml | 2 +- .../intel/cpu/xeon/compose_qdrant.yaml | 2 +- .../intel/cpu/xeon/compose_without_rerank.yaml | 2 +- ChatQnA/docker_image_build/build.yaml | 6 ------ ChatQnA/tests/test_compose_faqgen_on_epyc.sh | 12 +----------- ChatQnA/tests/test_compose_faqgen_on_xeon.sh | 6 +----- ChatQnA/tests/test_compose_faqgen_tgi_on_epyc.sh | 10 ---------- ChatQnA/tests/test_compose_mariadb_on_xeon.sh | 8 +------- ChatQnA/tests/test_compose_milvus_on_epyc.sh | 12 +----------- ChatQnA/tests/test_compose_milvus_on_xeon.sh | 8 +------- ChatQnA/tests/test_compose_on_epyc.sh | 12 +----------- ChatQnA/tests/test_compose_on_xeon.sh | 8 +------- ChatQnA/tests/test_compose_pinecone_on_epyc.sh | 12 +----------- ChatQnA/tests/test_compose_pinecone_on_xeon.sh | 8 +------- ChatQnA/tests/test_compose_qdrant_on_epyc.sh | 12 +----------- ChatQnA/tests/test_compose_qdrant_on_xeon.sh | 8 +------- .../tests/test_compose_without_rerank_on_epyc.sh | 12 +----------- .../tests/test_compose_without_rerank_on_xeon.sh | 9 +-------- 35 files changed, 32 insertions(+), 179 deletions(-) diff --git a/AudioQnA/docker_compose/amd/cpu/epyc/compose.yaml b/AudioQnA/docker_compose/amd/cpu/epyc/compose.yaml index 9d614bd738..588113d72b 100644 --- a/AudioQnA/docker_compose/amd/cpu/epyc/compose.yaml +++ b/AudioQnA/docker_compose/amd/cpu/epyc/compose.yaml @@ -26,7 +26,7 @@ services: https_proxy: ${https_proxy} restart: unless-stopped vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - ${LLM_SERVER_PORT:-3006}:80 diff --git a/AudioQnA/docker_compose/intel/cpu/xeon/compose.yaml b/AudioQnA/docker_compose/intel/cpu/xeon/compose.yaml index a9020a4b89..b48593a233 100644 --- a/AudioQnA/docker_compose/intel/cpu/xeon/compose.yaml +++ b/AudioQnA/docker_compose/intel/cpu/xeon/compose.yaml @@ -25,7 +25,7 @@ services: https_proxy: ${https_proxy} restart: unless-stopped vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - ${LLM_SERVER_PORT:-3006}:80 diff --git a/AudioQnA/docker_compose/intel/cpu/xeon/compose_multilang.yaml b/AudioQnA/docker_compose/intel/cpu/xeon/compose_multilang.yaml index 16b72813e2..21b3ecfc4d 100644 --- a/AudioQnA/docker_compose/intel/cpu/xeon/compose_multilang.yaml +++ b/AudioQnA/docker_compose/intel/cpu/xeon/compose_multilang.yaml @@ -29,7 +29,7 @@ services: # - ./pretrained_models/:/home/user/GPT-SoVITS/GPT_SoVITS/pretrained_models/ restart: unless-stopped vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - ${LLM_SERVER_PORT:-3006}:80 diff --git a/AudioQnA/docker_image_build/build.yaml b/AudioQnA/docker_image_build/build.yaml index e7688555c1..8e055ed2f9 100644 --- a/AudioQnA/docker_image_build/build.yaml +++ b/AudioQnA/docker_image_build/build.yaml @@ -73,12 +73,6 @@ services: dockerfile: comps/third_parties/gpt-sovits/src/Dockerfile extends: audioqna image: ${REGISTRY:-opea}/gpt-sovits:${TAG:-latest} - vllm: - build: - context: vllm - dockerfile: docker/Dockerfile.cpu - extends: audioqna - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} vllm-gaudi: build: context: vllm-fork diff --git a/AudioQnA/tests/test_compose_multilang_on_xeon.sh b/AudioQnA/tests/test_compose_multilang_on_xeon.sh index 770838c1e2..d8af4c22d2 100644 --- a/AudioQnA/tests/test_compose_multilang_on_xeon.sh +++ b/AudioQnA/tests/test_compose_multilang_on_xeon.sh @@ -25,14 +25,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git - cd ./vllm/ - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null && cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="audioqna-multilang audioqna-ui whisper gpt-sovits vllm" + service_list="audioqna-multilang audioqna-ui whisper gpt-sovits" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/AudioQnA/tests/test_compose_on_epyc.sh b/AudioQnA/tests/test_compose_on_epyc.sh index 6fc56775a8..10a7f2d76a 100644 --- a/AudioQnA/tests/test_compose_on_epyc.sh +++ b/AudioQnA/tests/test_compose_on_epyc.sh @@ -27,20 +27,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git - cd ./vllm/ - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="audioqna audioqna-ui whisper speecht5 vllm" + service_list="audioqna audioqna-ui whisper speecht5" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/AudioQnA/tests/test_compose_on_xeon.sh b/AudioQnA/tests/test_compose_on_xeon.sh index a83e4a598e..e25bc5a1d2 100644 --- a/AudioQnA/tests/test_compose_on_xeon.sh +++ b/AudioQnA/tests/test_compose_on_xeon.sh @@ -25,14 +25,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git - cd ./vllm/ - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null && cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="audioqna audioqna-ui whisper speecht5 vllm" + service_list="audioqna audioqna-ui whisper speecht5" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/docker_compose/amd/cpu/epyc/compose.yaml b/ChatQnA/docker_compose/amd/cpu/epyc/compose.yaml index ee5e810900..9b44783b5b 100644 --- a/ChatQnA/docker_compose/amd/cpu/epyc/compose.yaml +++ b/ChatQnA/docker_compose/amd/cpu/epyc/compose.yaml @@ -90,7 +90,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "9009:80" diff --git a/ChatQnA/docker_compose/amd/cpu/epyc/compose_faqgen.yaml b/ChatQnA/docker_compose/amd/cpu/epyc/compose_faqgen.yaml index 13888c3e81..61580e1865 100644 --- a/ChatQnA/docker_compose/amd/cpu/epyc/compose_faqgen.yaml +++ b/ChatQnA/docker_compose/amd/cpu/epyc/compose_faqgen.yaml @@ -83,7 +83,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-server ports: - ${LLM_ENDPOINT_PORT:-9009}:80 diff --git a/ChatQnA/docker_compose/amd/cpu/epyc/compose_milvus.yaml b/ChatQnA/docker_compose/amd/cpu/epyc/compose_milvus.yaml index ef1b271d76..7936c11135 100644 --- a/ChatQnA/docker_compose/amd/cpu/epyc/compose_milvus.yaml +++ b/ChatQnA/docker_compose/amd/cpu/epyc/compose_milvus.yaml @@ -147,7 +147,7 @@ services: command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "9009:80" diff --git a/ChatQnA/docker_compose/amd/cpu/epyc/compose_pinecone.yaml b/ChatQnA/docker_compose/amd/cpu/epyc/compose_pinecone.yaml index 2681a92642..514a8803f6 100644 --- a/ChatQnA/docker_compose/amd/cpu/epyc/compose_pinecone.yaml +++ b/ChatQnA/docker_compose/amd/cpu/epyc/compose_pinecone.yaml @@ -77,7 +77,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "9009:80" diff --git a/ChatQnA/docker_compose/amd/cpu/epyc/compose_qdrant.yaml b/ChatQnA/docker_compose/amd/cpu/epyc/compose_qdrant.yaml index 821bc02450..2850318d50 100644 --- a/ChatQnA/docker_compose/amd/cpu/epyc/compose_qdrant.yaml +++ b/ChatQnA/docker_compose/amd/cpu/epyc/compose_qdrant.yaml @@ -83,7 +83,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "6042:80" diff --git a/ChatQnA/docker_compose/amd/cpu/epyc/compose_without_rerank.yaml b/ChatQnA/docker_compose/amd/cpu/epyc/compose_without_rerank.yaml index 1b701c0279..296546e986 100644 --- a/ChatQnA/docker_compose/amd/cpu/epyc/compose_without_rerank.yaml +++ b/ChatQnA/docker_compose/amd/cpu/epyc/compose_without_rerank.yaml @@ -67,7 +67,7 @@ services: RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS" restart: unless-stopped vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "9009:80" diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/compose.yaml b/ChatQnA/docker_compose/intel/cpu/xeon/compose.yaml index fb9d4ce9c8..e276089e36 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/compose.yaml +++ b/ChatQnA/docker_compose/intel/cpu/xeon/compose.yaml @@ -89,7 +89,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "9009:80" diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/compose_faqgen.yaml b/ChatQnA/docker_compose/intel/cpu/xeon/compose_faqgen.yaml index e34f6f0062..a1a7d05fba 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/compose_faqgen.yaml +++ b/ChatQnA/docker_compose/intel/cpu/xeon/compose_faqgen.yaml @@ -81,7 +81,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-server ports: - ${LLM_ENDPOINT_PORT:-9009}:80 diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/compose_mariadb.yaml b/ChatQnA/docker_compose/intel/cpu/xeon/compose_mariadb.yaml index ccd55bbce3..ab5217f359 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/compose_mariadb.yaml +++ b/ChatQnA/docker_compose/intel/cpu/xeon/compose_mariadb.yaml @@ -90,7 +90,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "9009:80" diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/compose_milvus.yaml b/ChatQnA/docker_compose/intel/cpu/xeon/compose_milvus.yaml index 67226bd404..2f6fe6d439 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/compose_milvus.yaml +++ b/ChatQnA/docker_compose/intel/cpu/xeon/compose_milvus.yaml @@ -144,7 +144,7 @@ services: command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "9009:80" diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/compose_pinecone.yaml b/ChatQnA/docker_compose/intel/cpu/xeon/compose_pinecone.yaml index cfa6c5aebe..ffa4dd67f8 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/compose_pinecone.yaml +++ b/ChatQnA/docker_compose/intel/cpu/xeon/compose_pinecone.yaml @@ -76,7 +76,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "9009:80" diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/compose_qdrant.yaml b/ChatQnA/docker_compose/intel/cpu/xeon/compose_qdrant.yaml index fb12b77e1a..3fdd295374 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/compose_qdrant.yaml +++ b/ChatQnA/docker_compose/intel/cpu/xeon/compose_qdrant.yaml @@ -81,7 +81,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "6042:80" diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/compose_without_rerank.yaml b/ChatQnA/docker_compose/intel/cpu/xeon/compose_without_rerank.yaml index 841a0ad531..00e6474b86 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/compose_without_rerank.yaml +++ b/ChatQnA/docker_compose/intel/cpu/xeon/compose_without_rerank.yaml @@ -65,7 +65,7 @@ services: RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS" restart: unless-stopped vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "9009:80" diff --git a/ChatQnA/docker_image_build/build.yaml b/ChatQnA/docker_image_build/build.yaml index 3a5f6d4be3..f7f831e67f 100644 --- a/ChatQnA/docker_image_build/build.yaml +++ b/ChatQnA/docker_image_build/build.yaml @@ -114,12 +114,6 @@ services: context: GenAIComps dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest} - vllm: - build: - context: vllm - dockerfile: docker/Dockerfile.cpu - extends: chatqna - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} vllm-gaudi: build: context: vllm-fork diff --git a/ChatQnA/tests/test_compose_faqgen_on_epyc.sh b/ChatQnA/tests/test_compose_faqgen_on_epyc.sh index 1571349095..04a8929651 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_epyc.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_epyc.sh @@ -25,19 +25,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever llm-faqgen vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever llm-faqgen nginx" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_faqgen_on_xeon.sh b/ChatQnA/tests/test_compose_faqgen_on_xeon.sh index bb18e3db55..4a9e51fb6a 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_xeon.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_xeon.sh @@ -23,13 +23,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever llm-faqgen vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever llm-faqgen nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_faqgen_tgi_on_epyc.sh b/ChatQnA/tests/test_compose_faqgen_tgi_on_epyc.sh index 75d86a4a6e..6c3c27d366 100644 --- a/ChatQnA/tests/test_compose_faqgen_tgi_on_epyc.sh +++ b/ChatQnA/tests/test_compose_faqgen_tgi_on_epyc.sh @@ -25,16 +25,6 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." service_list="chatqna chatqna-ui dataprep retriever llm-faqgen nginx" diff --git a/ChatQnA/tests/test_compose_mariadb_on_xeon.sh b/ChatQnA/tests/test_compose_mariadb_on_xeon.sh index 4d834e2abc..d1e0edc773 100644 --- a/ChatQnA/tests/test_compose_mariadb_on_xeon.sh +++ b/ChatQnA/tests/test_compose_mariadb_on_xeon.sh @@ -23,15 +23,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - # make sure NOT change the pwd - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_milvus_on_epyc.sh b/ChatQnA/tests/test_compose_milvus_on_epyc.sh index 851f6cb33d..212bed7a64 100644 --- a/ChatQnA/tests/test_compose_milvus_on_epyc.sh +++ b/ChatQnA/tests/test_compose_milvus_on_epyc.sh @@ -26,19 +26,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_milvus_on_xeon.sh b/ChatQnA/tests/test_compose_milvus_on_xeon.sh index 2bd97cb05a..254e7ed11b 100644 --- a/ChatQnA/tests/test_compose_milvus_on_xeon.sh +++ b/ChatQnA/tests/test_compose_milvus_on_xeon.sh @@ -24,15 +24,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - # make sure NOT change the pwd - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_on_epyc.sh b/ChatQnA/tests/test_compose_on_epyc.sh index 851f6cb33d..212bed7a64 100644 --- a/ChatQnA/tests/test_compose_on_epyc.sh +++ b/ChatQnA/tests/test_compose_on_epyc.sh @@ -26,19 +26,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_on_xeon.sh b/ChatQnA/tests/test_compose_on_xeon.sh index 965ddb34dd..579dcde2db 100644 --- a/ChatQnA/tests/test_compose_on_xeon.sh +++ b/ChatQnA/tests/test_compose_on_xeon.sh @@ -23,15 +23,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - # make sure NOT change the pwd - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_pinecone_on_epyc.sh b/ChatQnA/tests/test_compose_pinecone_on_epyc.sh index 02bddd1450..cb1341d921 100755 --- a/ChatQnA/tests/test_compose_pinecone_on_epyc.sh +++ b/ChatQnA/tests/test_compose_pinecone_on_epyc.sh @@ -26,19 +26,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_pinecone_on_xeon.sh b/ChatQnA/tests/test_compose_pinecone_on_xeon.sh index 64c8efc7df..c3961600ed 100755 --- a/ChatQnA/tests/test_compose_pinecone_on_xeon.sh +++ b/ChatQnA/tests/test_compose_pinecone_on_xeon.sh @@ -24,15 +24,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - # Not change the pwd - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_qdrant_on_epyc.sh b/ChatQnA/tests/test_compose_qdrant_on_epyc.sh index 1489a346c7..092bfb0bb1 100644 --- a/ChatQnA/tests/test_compose_qdrant_on_epyc.sh +++ b/ChatQnA/tests/test_compose_qdrant_on_epyc.sh @@ -24,19 +24,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_qdrant_on_xeon.sh b/ChatQnA/tests/test_compose_qdrant_on_xeon.sh index c7a17aac49..0889c4ad4d 100644 --- a/ChatQnA/tests/test_compose_qdrant_on_xeon.sh +++ b/ChatQnA/tests/test_compose_qdrant_on_xeon.sh @@ -23,15 +23,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - # Not change the pwd - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_without_rerank_on_epyc.sh b/ChatQnA/tests/test_compose_without_rerank_on_epyc.sh index 06f96f9290..9a1777bd15 100644 --- a/ChatQnA/tests/test_compose_without_rerank_on_epyc.sh +++ b/ChatQnA/tests/test_compose_without_rerank_on_epyc.sh @@ -24,19 +24,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh b/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh index f236a8ab76..91a6e5b656 100644 --- a/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh +++ b/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh @@ -23,16 +23,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - # Not change the pwd - cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s From eb7b9b348bb594ad0aaa4b635d2ea6de67a3a477 Mon Sep 17 00:00:00 2001 From: zhihang Date: Tue, 9 Sep 2025 11:42:27 +0800 Subject: [PATCH 14/78] Add openEuler support for AudioQnA (#2191) Signed-off-by: zhihang --- AudioQnA/Dockerfile.openEuler | 10 ++ .../intel/cpu/xeon/compose_openeuler.yaml | 92 ++++++++++++++++ AudioQnA/docker_image_build/build.yaml | 29 +++++ .../tests/test_compose_openeuler_on_xeon.sh | 103 ++++++++++++++++++ AudioQnA/ui/docker/Dockerfile.openEuler | 30 +++++ 5 files changed, 264 insertions(+) create mode 100644 AudioQnA/Dockerfile.openEuler create mode 100644 AudioQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml create mode 100644 AudioQnA/tests/test_compose_openeuler_on_xeon.sh create mode 100644 AudioQnA/ui/docker/Dockerfile.openEuler diff --git a/AudioQnA/Dockerfile.openEuler b/AudioQnA/Dockerfile.openEuler new file mode 100644 index 0000000000..3a6c5928a5 --- /dev/null +++ b/AudioQnA/Dockerfile.openEuler @@ -0,0 +1,10 @@ +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +ARG IMAGE_REPO=opea +ARG BASE_TAG=latest +FROM $IMAGE_REPO/comps-base:$BASE_TAG-openeuler + +COPY ./audioqna.py $HOME/audioqna.py + +ENTRYPOINT ["python", "audioqna.py"] \ No newline at end of file diff --git a/AudioQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml b/AudioQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml new file mode 100644 index 0000000000..f977cf8ac8 --- /dev/null +++ b/AudioQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml @@ -0,0 +1,92 @@ +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +services: + whisper-service: + image: ${REGISTRY:-opea}/whisper:${TAG:-latest}-openeuler + container_name: whisper-service + ports: + - ${WHISPER_SERVER_PORT:-7066}:7066 + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + restart: unless-stopped + speecht5-service: + image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}-openeuler + container_name: speecht5-service + ports: + - ${SPEECHT5_SERVER_PORT:-7055}:7055 + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + restart: unless-stopped + vllm-service: + image: openeuler/vllm-cpu:0.9.1-oe2403lts + container_name: vllm-service + ports: + - ${LLM_SERVER_PORT:-3006}:80 + volumes: + - "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub" + shm_size: 128g + privileged: true + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HF_TOKEN: ${HF_TOKEN} + LLM_MODEL_ID: ${LLM_MODEL_ID} + VLLM_TORCH_PROFILER_DIR: "/mnt" + LLM_SERVER_PORT: ${LLM_SERVER_PORT} + VLLM_CPU_OMP_THREADS_BIND: all + VLLM_CPU_KVCACHE_SPACE: 30 + healthcheck: + test: ["CMD-SHELL", "curl -f http://$host_ip:${LLM_SERVER_PORT}/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 + command: --model ${LLM_MODEL_ID} --host 0.0.0.0 --port 80 + audioqna-xeon-backend-server: + image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}-openeuler + container_name: audioqna-xeon-backend-server + depends_on: + - whisper-service + - vllm-service + - speecht5-service + ports: + - "3008:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - WHISPER_SERVER_HOST_IP=${WHISPER_SERVER_HOST_IP} + - WHISPER_SERVER_PORT=${WHISPER_SERVER_PORT} + - LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP} + - LLM_SERVER_PORT=${LLM_SERVER_PORT} + - LLM_MODEL_ID=${LLM_MODEL_ID} + - SPEECHT5_SERVER_HOST_IP=${SPEECHT5_SERVER_HOST_IP} + - SPEECHT5_SERVER_PORT=${SPEECHT5_SERVER_PORT} + ipc: host + restart: always + audioqna-xeon-ui-server: + image: ${REGISTRY:-opea}/audioqna-ui:${TAG:-latest}-openeuler + container_name: audioqna-xeon-ui-server + depends_on: + - audioqna-xeon-backend-server + ports: + - "5173:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - CHAT_URL=${BACKEND_SERVICE_ENDPOINT} + ipc: host + restart: always + +networks: + default: + driver: bridge diff --git a/AudioQnA/docker_image_build/build.yaml b/AudioQnA/docker_image_build/build.yaml index 8e055ed2f9..95dd19a322 100644 --- a/AudioQnA/docker_image_build/build.yaml +++ b/AudioQnA/docker_image_build/build.yaml @@ -13,12 +13,29 @@ services: context: ../ dockerfile: ./Dockerfile image: ${REGISTRY:-opea}/audioqna:${TAG:-latest} + audioqna-openeuler: + build: + args: + IMAGE_REPO: ${REGISTRY} + BASE_TAG: ${TAG} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + no_proxy: ${no_proxy} + context: ../ + dockerfile: ./Dockerfile.openEuler + image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}-openeuler audioqna-ui: build: context: ../ui dockerfile: ./docker/Dockerfile extends: audioqna image: ${REGISTRY:-opea}/audioqna-ui:${TAG:-latest} + audioqna-ui-openeuler: + build: + context: ../ui + dockerfile: ./docker/Dockerfile.openEuler + extends: audioqna + image: ${REGISTRY:-opea}/audioqna-ui:${TAG:-latest}-openeuler audioqna-multilang: build: context: ../ @@ -37,6 +54,12 @@ services: dockerfile: comps/third_parties/whisper/src/Dockerfile extends: audioqna image: ${REGISTRY:-opea}/whisper:${TAG:-latest} + whisper-openeuler: + build: + context: GenAIComps + dockerfile: comps/third_parties/whisper/src/Dockerfile.openEuler + extends: audioqna + image: ${REGISTRY:-opea}/whisper:${TAG:-latest}-openeuler asr: build: context: GenAIComps @@ -61,6 +84,12 @@ services: dockerfile: comps/third_parties/speecht5/src/Dockerfile extends: audioqna image: ${REGISTRY:-opea}/speecht5:${TAG:-latest} + speecht5-openeuler: + build: + context: GenAIComps + dockerfile: comps/third_parties/speecht5/src/Dockerfile.openEuler + extends: audioqna + image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}-openeuler tts: build: context: GenAIComps diff --git a/AudioQnA/tests/test_compose_openeuler_on_xeon.sh b/AudioQnA/tests/test_compose_openeuler_on_xeon.sh new file mode 100644 index 0000000000..6a1728a573 --- /dev/null +++ b/AudioQnA/tests/test_compose_openeuler_on_xeon.sh @@ -0,0 +1,103 @@ +#!/bin/bash +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +set -e +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} +export MODEL_CACHE=${model_cache:-"./data"} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + echo "GenAIComps test commit is $(git rev-parse HEAD)" + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG}-openeuler --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.openEuler . + popd && sleep 1s + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="audioqna-openeuler audioqna-ui-openeuler whisper-openeuler speecht5-openeuler" + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + export host_ip=${ip_address} + source set_env.sh + # sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env + + # Start Docker Containers + docker compose -f compose_openeuler.yaml up -d > ${LOG_PATH}/start_services_with_compose.log + n=0 + until [[ "$n" -ge 200 ]]; do + docker logs vllm-service > $LOG_PATH/vllm_service_start.log 2>&1 + if grep -q complete $LOG_PATH/vllm_service_start.log; then + break + fi + sleep 5s + n=$((n+1)) + done +} + + +function validate_megaservice() { + response=$(http_proxy="" curl http://${ip_address}:3008/v1/audioqna -XPOST -d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA", "max_tokens":64}' -H 'Content-Type: application/json') + # always print the log + docker logs whisper-service > $LOG_PATH/whisper-service.log + docker logs speecht5-service > $LOG_PATH/tts-service.log + docker logs vllm-service > $LOG_PATH/vllm-service.log + docker logs audioqna-xeon-backend-server > $LOG_PATH/audioqna-xeon-backend-server.log + echo "$response" | sed 's/^"//;s/"$//' | base64 -d > speech.mp3 + + if [[ $(file speech.mp3) == *"RIFF"* ]]; then + echo "Result correct." + else + echo "Result wrong." + exit 1 + fi + +} + +function stop_docker() { + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + docker compose -f compose_openeuler.yaml stop && docker compose rm -f +} + +function main() { + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + echo "::group::build_docker_images" + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + echo "::endgroup::" + + echo "::group::start_services" + start_services + echo "::endgroup::" + + echo "::group::validate_megaservice" + validate_megaservice + echo "::endgroup::" + + echo "::group::stop_docker" + stop_docker + docker system prune -f + echo "::endgroup::" + +} + +main diff --git a/AudioQnA/ui/docker/Dockerfile.openEuler b/AudioQnA/ui/docker/Dockerfile.openEuler new file mode 100644 index 0000000000..0a4d701592 --- /dev/null +++ b/AudioQnA/ui/docker/Dockerfile.openEuler @@ -0,0 +1,30 @@ +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +# Use node 20.11.1 as the base image +FROM openeuler/node:20.11.1-oe2403lts + +# Update package manager and install Git +RUN yum update -y && \ + yum install -y \ + git && \ + yum clean all && \ + rm -rf /var/cache/yum + +# Copy the front-end code repository +COPY svelte /home/user/svelte + +# Set the working directory +WORKDIR /home/user/svelte + +# Install front-end dependencies +RUN npm install + +# Build the front-end application +RUN npm run build + +# Expose the port of the front-end application +EXPOSE 5173 + +# Run the front-end application in preview mode +CMD ["npm", "run", "preview", "--", "--port", "5173", "--host", "0.0.0.0"] From 3c88c00efd52bbae097e5dbed7d3d422a5b3e1ac Mon Sep 17 00:00:00 2001 From: Yi Yao Date: Wed, 10 Sep 2025 09:03:55 +0800 Subject: [PATCH 15/78] Update for the Docker image name changes (#2240) Signed-off-by: Yi Yao Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../docker_compose/amd/cpu/epyc/README.md | 32 ++++++++++-------- .../docker_compose/amd/cpu/epyc/compose.yaml | 6 ++-- .../docker_compose/intel/cpu/xeon/README.md | 33 +++++++++++-------- .../intel/cpu/xeon/compose.yaml | 6 ++-- .../intel/cpu/xeon/compose_remote.yaml | 6 ++-- .../docker_image_build/build.yaml | 8 ++--- docker_images_list.md | 10 +++--- 7 files changed, 58 insertions(+), 43 deletions(-) diff --git a/ProductivitySuite/docker_compose/amd/cpu/epyc/README.md b/ProductivitySuite/docker_compose/amd/cpu/epyc/README.md index c4cdd2673a..7263cea913 100644 --- a/ProductivitySuite/docker_compose/amd/cpu/epyc/README.md +++ b/ProductivitySuite/docker_compose/amd/cpu/epyc/README.md @@ -10,14 +10,18 @@ This document details the deployment process for the OPEA Productivity Suite usi This section describes how to quickly deploy and test the Productivity Suite service manually on AMD EPYC™ platform. The basic steps are: -1. [Access the Code](#access-the-code) -2. [Generate a HuggingFace Access Token](#generate-a-huggingface-access-token) -3. [Configure the Deployment Environment](#configure-the-deployment-environment) -4. [Deploy the Service Using Docker Compose](#deploy-the-service-using-docker-compose) -5. [Check the Deployment Status](#check-the-deployment-status) -6. [Setup Keycloak](#setup-keycloak) -7. [Test the Pipeline](#test-the-pipeline) -8. [Cleanup the Deployment](#cleanup-the-deployment) +- [Build Mega Service of Productivity Suite on AMD EPYC™ Processors](#build-mega-service-of-productivity-suite-on-amd-epyc-processors) + - [Productivity Suite Quick Start Deployment](#productivity-suite-quick-start-deployment) + - [Access the Code](#access-the-code) + - [Generate a HuggingFace Access Token](#generate-a-huggingface-access-token) + - [Configure the Deployment Environment](#configure-the-deployment-environment) + - [Deploy the Service Using Docker Compose](#deploy-the-service-using-docker-compose) + - [Check the Deployment Status](#check-the-deployment-status) + - [Setup Keycloak](#setup-keycloak) + - [Test the Pipeline](#test-the-pipeline) + - [Cleanup the Deployment](#cleanup-the-deployment) + - [Productivity Suite Docker Compose Files](#productivity-suite-docker-compose-files) + - [Productivity Suite Service Configuration](#productivity-suite-service-configuration) ### Access the Code @@ -126,8 +130,8 @@ ea7444faa8b2 ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu 9fb471c452ec quay.io/keycloak/keycloak:25.0.2 "/opt/keycloak/bin/k…" 8 minutes ago Up 8 minutes 8443/tcp, 0.0.0.0:8080->8080/tcp, :::8080->8080/tcp, 9000/tcp keycloak-server a00ac544abb7 ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 "/bin/sh -c 'apt-get…" 8 minutes ago Up 8 minutes (healthy) 0.0.0.0:6006->80/tcp, :::6006->80/tcp tei-embedding-server 87c2996111d5 redis/redis-stack:7.2.0-v9 "/entrypoint.sh" 8 minutes ago Up 8 minutes 0.0.0.0:6379->6379/tcp, :::6379->6379/tcp, 0.0.0.0:8001->8001/tcp, :::8001->8001/tcp redis-vector-db -536b71e4ec67 opea/chathistory-mongo:latest "python opea_chathis…" 8 minutes ago Up 8 minutes 0.0.0.0:6012->6012/tcp, :::6012->6012/tcp chathistory-mongo-server -8d56c2b03431 opea/promptregistry-mongo:latest "python opea_prompt_…" 8 minutes ago Up 8 minutes 0.0.0.0:6018->6018/tcp, :::6018->6018/tcp promptregistry-mongo-server +536b71e4ec67 opea/chathistory:latest "python opea_chathis…" 8 minutes ago Up 8 minutes 0.0.0.0:6012->6012/tcp, :::6012->6012/tcp chathistory-mongo-server +8d56c2b03431 opea/promptregistry:latest "python opea_prompt_…" 8 minutes ago Up 8 minutes 0.0.0.0:6018->6018/tcp, :::6018->6018/tcp promptregistry-mongo-server c48921438848 ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 "/bin/sh -c 'apt-get…" 8 minutes ago Up 8 minutes (healthy) 0.0.0.0:8808->80/tcp, :::8808->80/tcp tei-reranking-server ``` @@ -203,7 +207,7 @@ The compose.yaml is default compose file using tgi as serving framework | Service Name | Image Name | | --------------------------------------- | ------------------------------------------------------------- | -| chathistory-mongo-server | opea/chathistory-mongo:latest | +| chathistory-mongo-server | opea/chathistory:latest | | chatqna-epyc-backend-server | opea/chatqna:latest | | codegen-epyc-backend-server | opea/codegen:latest | | dataprep-redis-server | opea/dataprep:latest | @@ -213,7 +217,7 @@ The compose.yaml is default compose file using tgi as serving framework | llm-textgen-server-codegen | opea/llm-textgen:latest | | mongodb | mongo:7.0.11 | | productivity-suite-epyc-react-ui-server | opea/productivity-suite-react-ui-server:latest | -| promptregistry-mongo-server | opea/promptregistry-mongo:latest | +| promptregistry-mongo-server | opea/promptregistry:latest | | redis-vector-db | redis/redis-stack:7.2.0-v9 | | retriever-redis-server | opea/retriever:latest | | tei-embedding-server | ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 | @@ -228,7 +232,7 @@ The table provides a comprehensive overview of the Productivity Suite service ut | Service Name | Possible Image Names | Optional | Description | | --------------------------------------- | ------------------------------------------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------- | -| chathistory-mongo-server | opea/chathistory-mongo:latest | No | Handles chat history storage and retrieval using MongoDB. | +| chathistory-mongo-server | opea/chathistory:latest | No | Handles chat history storage and retrieval using MongoDB. | | chatqna-epyc-backend-server | opea/chatqna:latest | No | Handles question answering and chat interactions. | | codegen-epyc-backend-server | opea/codegen:latest | No | Handles code generation tasks. | | dataprep-redis-server | opea/dataprep:latest | No | Handles data preparation and preprocessing tasks for downstream services. | @@ -238,7 +242,7 @@ The table provides a comprehensive overview of the Productivity Suite service ut | llm-textgen-server-codegen | opea/llm-textgen:latest | No | Handles large language model (LLM) text generation tasks, providing inference APIs for code and text completion. | | mongodb | mongo:7.0.11 | No | Provides persistent storage for application data using MongoDB. | | productivity-suite-epyc-react-ui-server | opea/productivity-suite-react-ui-server:latest | No | Hosts the web-based user interface for interacting with the Productivity Suite services. | -| promptregistry-mongo-server | opea/promptregistry-mongo:latest | No | Manages storage and retrieval of prompt templates and related metadata. | +| promptregistry-mongo-server | opea/promptregistry:latest | No | Manages storage and retrieval of prompt templates and related metadata. | | redis-vector-db | redis/redis-stack:7.2.0-v9 | No | Offers in-memory data storage and vector database capabilities for fast retrieval and caching. | | retriever-redis-server | opea/retriever:latest | No | Handles retrieval-augmented generation tasks, enabling efficient document and context retrieval. | | tei-embedding-server | ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 | No | Provides text embedding and sequence classification services for downstream NLP tasks. | diff --git a/ProductivitySuite/docker_compose/amd/cpu/epyc/compose.yaml b/ProductivitySuite/docker_compose/amd/cpu/epyc/compose.yaml index 83ea9c8a03..fe43844af7 100644 --- a/ProductivitySuite/docker_compose/amd/cpu/epyc/compose.yaml +++ b/ProductivitySuite/docker_compose/amd/cpu/epyc/compose.yaml @@ -222,7 +222,7 @@ services: command: mongod --quiet --logpath /dev/null chathistory-mongo: - image: ${REGISTRY:-opea}/chathistory-mongo:${TAG:-latest} + image: ${REGISTRY:-opea}/chathistory:${TAG:-latest} container_name: chathistory-mongo-server ports: - "6012:6012" @@ -231,6 +231,7 @@ services: http_proxy: ${http_proxy} no_proxy: ${no_proxy} https_proxy: ${https_proxy} + OPEA_STORE_NAME: "mongodb" MONGO_HOST: ${MONGO_HOST:-mongo} MONGO_PORT: ${MONGO_PORT:-27017} COLLECTION_NAME: ${COLLECTION_NAME:-Conversations} @@ -238,7 +239,7 @@ services: restart: unless-stopped promptregistry-mongo: - image: ${REGISTRY:-opea}/promptregistry-mongo:${TAG:-latest} + image: ${REGISTRY:-opea}/promptregistry:${TAG:-latest} container_name: promptregistry-mongo-server ports: - "6018:6018" @@ -247,6 +248,7 @@ services: http_proxy: ${http_proxy} https_proxy: ${https_proxy} no_proxy: ${no_proxy} + OPEA_STORE_NAME: "mongodb" MONGO_HOST: ${MONGO_HOST:-mongo} MONGO_PORT: ${MONGO_PORT:-27017} COLLECTION_NAME: ${PROMPT_COLLECTION_NAME:-prompt} diff --git a/ProductivitySuite/docker_compose/intel/cpu/xeon/README.md b/ProductivitySuite/docker_compose/intel/cpu/xeon/README.md index 8ac4dc65b5..0849b80ad7 100644 --- a/ProductivitySuite/docker_compose/intel/cpu/xeon/README.md +++ b/ProductivitySuite/docker_compose/intel/cpu/xeon/README.md @@ -10,14 +10,19 @@ This document outlines the deployment process for OPEA Productivity Suite utiliz This section describes how to quickly deploy and test the Productivity Suite service manually on Intel® Xeon® platform. The basic steps are: -1. [Access the Code](#access-the-code) -2. [Generate a HuggingFace Access Token](#generate-a-huggingface-access-token) -3. [Configure the Deployment Environment](#configure-the-deployment-environment) -4. [Deploy the Service Using Docker Compose](#deploy-the-service-using-docker-compose) -5. [Check the Deployment Status](#check-the-deployment-status) -6. [Setup Keycloak](#setup-keycloak) -7. [Test the Pipeline](#test-the-pipeline) -8. [Cleanup the Deployment](#cleanup-the-deployment) +- [Example Productivity Suite Deployment on Intel® Xeon® Platform](#example-productivity-suite-deployment-on-intel-xeon-platform) + - [Productivity Suite Quick Start Deployment](#productivity-suite-quick-start-deployment) + - [Access the Code](#access-the-code) + - [Generate a HuggingFace Access Token](#generate-a-huggingface-access-token) + - [Configure the Deployment Environment](#configure-the-deployment-environment) + - [Deploy the Service Using Docker Compose](#deploy-the-service-using-docker-compose) + - [Check the Deployment Status](#check-the-deployment-status) + - [Setup Keycloak](#setup-keycloak) + - [Test the Pipeline](#test-the-pipeline) + - [Cleanup the Deployment](#cleanup-the-deployment) + - [Productivity Suite Docker Compose Files](#productivity-suite-docker-compose-files) + - [Productivity Suite Service Configuration](#productivity-suite-service-configuration) + - [Running LLM models with remote endpoints](#running-llm-models-with-remote-endpoints) ### Access the Code @@ -109,8 +114,8 @@ ea7444faa8b2 ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu 9fb471c452ec quay.io/keycloak/keycloak:25.0.2 "/opt/keycloak/bin/k…" 8 minutes ago Up 8 minutes 8443/tcp, 0.0.0.0:8080->8080/tcp, :::8080->8080/tcp, 9000/tcp keycloak-server a00ac544abb7 ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 "/bin/sh -c 'apt-get…" 8 minutes ago Up 8 minutes (healthy) 0.0.0.0:6006->80/tcp, :::6006->80/tcp tei-embedding-server 87c2996111d5 redis/redis-stack:7.2.0-v9 "/entrypoint.sh" 8 minutes ago Up 8 minutes 0.0.0.0:6379->6379/tcp, :::6379->6379/tcp, 0.0.0.0:8001->8001/tcp, :::8001->8001/tcp redis-vector-db -536b71e4ec67 opea/chathistory-mongo:latest "python opea_chathis…" 8 minutes ago Up 8 minutes 0.0.0.0:6012->6012/tcp, :::6012->6012/tcp chathistory-mongo-server -8d56c2b03431 opea/promptregistry-mongo:latest "python opea_prompt_…" 8 minutes ago Up 8 minutes 0.0.0.0:6018->6018/tcp, :::6018->6018/tcp promptregistry-mongo-server +536b71e4ec67 opea/chathistory:latest "python opea_chathis…" 8 minutes ago Up 8 minutes 0.0.0.0:6012->6012/tcp, :::6012->6012/tcp chathistory-mongo-server +8d56c2b03431 opea/promptregistry:latest "python opea_prompt_…" 8 minutes ago Up 8 minutes 0.0.0.0:6018->6018/tcp, :::6018->6018/tcp promptregistry-mongo-server c48921438848 ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 "/bin/sh -c 'apt-get…" 8 minutes ago Up 8 minutes (healthy) 0.0.0.0:8808->80/tcp, :::8808->80/tcp tei-reranking-server ``` @@ -186,7 +191,7 @@ The compose.yaml is default compose file using tgi as serving framework | Service Name | Image Name | | --------------------------------------- | ------------------------------------------------------------- | -| chathistory-mongo-server | opea/chathistory-mongo:latest | +| chathistory-mongo-server | opea/chathistory:latest | | chatqna-xeon-backend-server | opea/chatqna:latest | | codegen-xeon-backend-server | opea/codegen:latest | | dataprep-redis-server | opea/dataprep:latest | @@ -196,7 +201,7 @@ The compose.yaml is default compose file using tgi as serving framework | llm-textgen-server-codegen | opea/llm-textgen:latest | | mongodb | mongo:7.0.11 | | productivity-suite-xeon-react-ui-server | opea/productivity-suite-react-ui-server:latest | -| promptregistry-mongo-server | opea/promptregistry-mongo:latest | +| promptregistry-mongo-server | opea/promptregistry:latest | | redis-vector-db | redis/redis-stack:7.2.0-v9 | | retriever-redis-server | opea/retriever:latest | | tei-embedding-server | ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 | @@ -211,7 +216,7 @@ The table provides a comprehensive overview of the Productivity Suite service ut | Service Name | Possible Image Names | Optional | Description | | --------------------------------------- | ------------------------------------------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------- | -| chathistory-mongo-server | opea/chathistory-mongo:latest | No | Handles chat history storage and retrieval using MongoDB. | +| chathistory-mongo-server | opea/chathistory:latest | No | Handles chat history storage and retrieval using MongoDB. | | chatqna-xeon-backend-server | opea/chatqna:latest | No | Handles question answering and chat interactions. | | codegen-xeon-backend-server | opea/codegen:latest | No | Handles code generation tasks. | | dataprep-redis-server | opea/dataprep:latest | No | Handles data preparation and preprocessing tasks for downstream services. | @@ -221,7 +226,7 @@ The table provides a comprehensive overview of the Productivity Suite service ut | llm-textgen-server-codegen | opea/llm-textgen:latest | No | Handles large language model (LLM) text generation tasks, providing inference APIs for code and text completion. | | mongodb | mongo:7.0.11 | No | Provides persistent storage for application data using MongoDB. | | productivity-suite-xeon-react-ui-server | opea/productivity-suite-react-ui-server:latest | No | Hosts the web-based user interface for interacting with the Productivity Suite services. | -| promptregistry-mongo-server | opea/promptregistry-mongo:latest | No | Manages storage and retrieval of prompt templates and related metadata. | +| promptregistry-mongo-server | opea/promptregistry:latest | No | Manages storage and retrieval of prompt templates and related metadata. | | redis-vector-db | redis/redis-stack:7.2.0-v9 | No | Offers in-memory data storage and vector database capabilities for fast retrieval and caching. | | retriever-redis-server | opea/retriever:latest | No | Handles retrieval-augmented generation tasks, enabling efficient document and context retrieval. | | tei-embedding-server | ghcr.io/huggingface/text-embeddings-inference:cpu-1.6 | No | Provides text embedding and sequence classification services for downstream NLP tasks. | diff --git a/ProductivitySuite/docker_compose/intel/cpu/xeon/compose.yaml b/ProductivitySuite/docker_compose/intel/cpu/xeon/compose.yaml index 4c243eb61a..4ab9b7f11f 100644 --- a/ProductivitySuite/docker_compose/intel/cpu/xeon/compose.yaml +++ b/ProductivitySuite/docker_compose/intel/cpu/xeon/compose.yaml @@ -222,7 +222,7 @@ services: command: mongod --quiet --logpath /dev/null chathistory-mongo: - image: ${REGISTRY:-opea}/chathistory-mongo:${TAG:-latest} + image: ${REGISTRY:-opea}/chathistory:${TAG:-latest} container_name: chathistory-mongo-server ports: - "6012:6012" @@ -231,6 +231,7 @@ services: http_proxy: ${http_proxy} no_proxy: ${no_proxy} https_proxy: ${https_proxy} + OPEA_STORE_NAME: "mongodb" MONGO_HOST: ${MONGO_HOST:-mongo} MONGO_PORT: ${MONGO_PORT:-27017} COLLECTION_NAME: ${COLLECTION_NAME:-Conversations} @@ -238,7 +239,7 @@ services: restart: unless-stopped promptregistry-mongo: - image: ${REGISTRY:-opea}/promptregistry-mongo:${TAG:-latest} + image: ${REGISTRY:-opea}/promptregistry:${TAG:-latest} container_name: promptregistry-mongo-server ports: - "6018:6018" @@ -247,6 +248,7 @@ services: http_proxy: ${http_proxy} https_proxy: ${https_proxy} no_proxy: ${no_proxy} + OPEA_STORE_NAME: "mongodb" MONGO_HOST: ${MONGO_HOST:-mongo} MONGO_PORT: ${MONGO_PORT:-27017} COLLECTION_NAME: ${PROMPT_COLLECTION_NAME:-prompt} diff --git a/ProductivitySuite/docker_compose/intel/cpu/xeon/compose_remote.yaml b/ProductivitySuite/docker_compose/intel/cpu/xeon/compose_remote.yaml index a117b7b70e..c78f1dd576 100644 --- a/ProductivitySuite/docker_compose/intel/cpu/xeon/compose_remote.yaml +++ b/ProductivitySuite/docker_compose/intel/cpu/xeon/compose_remote.yaml @@ -163,7 +163,7 @@ services: command: mongod --quiet --logpath /dev/null chathistory-mongo: - image: ${REGISTRY:-opea}/chathistory-mongo:${TAG:-latest} + image: ${REGISTRY:-opea}/chathistory:${TAG:-latest} container_name: chathistory-mongo-server ports: - "6012:6012" @@ -172,6 +172,7 @@ services: http_proxy: ${http_proxy} no_proxy: ${no_proxy} https_proxy: ${https_proxy} + OPEA_STORE_NAME: "mongodb" MONGO_HOST: ${MONGO_HOST:-mongo} MONGO_PORT: ${MONGO_PORT:-27017} COLLECTION_NAME: ${COLLECTION_NAME:-Conversations} @@ -179,7 +180,7 @@ services: restart: unless-stopped promptregistry-mongo: - image: ${REGISTRY:-opea}/promptregistry-mongo:${TAG:-latest} + image: ${REGISTRY:-opea}/promptregistry:${TAG:-latest} container_name: promptregistry-mongo-server ports: - "6018:6018" @@ -188,6 +189,7 @@ services: http_proxy: ${http_proxy} https_proxy: ${https_proxy} no_proxy: ${no_proxy} + OPEA_STORE_NAME: "mongodb" MONGO_HOST: ${MONGO_HOST:-mongo} MONGO_PORT: ${MONGO_PORT:-27017} COLLECTION_NAME: ${PROMPT_COLLECTION_NAME:-prompt} diff --git a/ProductivitySuite/docker_image_build/build.yaml b/ProductivitySuite/docker_image_build/build.yaml index 3ad40916ba..ee15e9ba06 100644 --- a/ProductivitySuite/docker_image_build/build.yaml +++ b/ProductivitySuite/docker_image_build/build.yaml @@ -43,18 +43,18 @@ services: dockerfile: comps/dataprep/src/Dockerfile extends: chatqna image: ${REGISTRY:-opea}/dataprep:${TAG:-latest} - promptregistry-mongo: + promptregistry: build: context: GenAIComps dockerfile: comps/prompt_registry/src/Dockerfile extends: chatqna - image: ${REGISTRY:-opea}/promptregistry-mongo:${TAG:-latest} - chathistory-mongo: + image: ${REGISTRY:-opea}/promptregistry:${TAG:-latest} + chathistory: build: context: GenAIComps dockerfile: comps/chathistory/src/Dockerfile extends: chatqna - image: ${REGISTRY:-opea}/chathistory-mongo:${TAG:-latest} + image: ${REGISTRY:-opea}/chathistory:${TAG:-latest} productivity-suite-react-ui-server: build: context: ../ui diff --git a/docker_images_list.md b/docker_images_list.md index 17c4d6faf0..6620531cca 100644 --- a/docker_images_list.md +++ b/docker_images_list.md @@ -53,14 +53,14 @@ Take ChatQnA for example. ChatQnA is a chatbot application service based on the | [opea/agent-ui](https://hub.docker.com/r/opea/agent-ui) | [Link](https://github.com/opea-project/GenAIExamples/blob/main/AgentQnA/ui/docker/Dockerfile) | OPEA agent microservice UI entry for GenAI applications use | [Link](https://github.com/opea-project/GenAIExamples/blob/main/AgentQnA/README.md) | | [opea/animation](https://hub.docker.com/r/opea/animation) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/animation/src/Dockerfile) | OPEA Avatar Animation microservice for GenAI applications | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/animation/src/README.md) | | [opea/asr](https://hub.docker.com/r/opea/asr) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/Dockerfile) | OPEA Audio-Speech-Recognition microservice for GenAI applications | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/asr/src/README.md) | -| [opea/chathistory-mongo](https://hub.docker.com/r/opea/chathistory-mongo) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/chathistory/src/Dockerfile) | OPEA Chat History microservice is based on a MongoDB database and is designed to allow users to store, retrieve and manage chat conversations. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/chathistory/src/README.md) | +| [opea/chathistory](https://hub.docker.com/r/opea/chathistory) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/chathistory/src/Dockerfile) | OPEA Chat History microservice is based on a NoSQL database and is designed to allow users to store, retrieve and manage chat conversations. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/chathistory/README.md) | | [opea/comps-base](https://hub.docker.com/r/opea/comps-base) | [Link](https://github.com/opea-project/GenAIComps/blob/main/Dockerfile) | OPEA Microservice base image. | [Link](https://github.com/opea-project/GenAIComps/blob/main/README.md) | | [opea/dataprep](https://hub.docker.com/r/opea/dataprep) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/src/Dockerfile) | OPEA data preparation microservices for GenAI applications | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/dataprep/README.md) | -| [opea/embedding](https://hub.docker.com/r/opea/embedding) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/Dockerfile) | OPEA mosec embedding microservice for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/README.md) | +| [opea/embedding](https://hub.docker.com/r/opea/embedding) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/src/Dockerfile) | OPEA mosec embedding microservice for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/embeddings/README.md) | | [opea/embedding-multimodal-bridgetower](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/Dockerfile) | OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/README.md) | | [opea/embedding-multimodal-bridgetower-gaudi](https://hub.docker.com/r/opea/embedding-multimodal-bridgetower-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/Dockerfile.intel_hpu) | OPEA multimodal embedded microservices based on bridgetower for use by GenAI applications on the Gaudi | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/bridgetower/src/README.md) | | [opea/embedding-multimodal-clip](https://hub.docker.com/r/opea/embedding-multimodal-clip) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/clip/src/Dockerfile) | OPEA mosec embedding microservice base on Langchain framework for GenAI application use | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/clip/src/README.md) | -| [opea/feedbackmanagement-mongo](https://hub.docker.com/r/opea/feedbackmanagement-mongo) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/src/Dockerfile) | OPEA feedback management microservice uses MongoDB database for GenAI applications. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/src/README.md) | +| [opea/feedbackmanagement](https://hub.docker.com/r/opea/feedbackmanagement) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/src/Dockerfile) | OPEA feedback management microservice uses NoSQL database for GenAI applications. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/feedback_management/README.md) | | [opea/finetuning](https://hub.docker.com/r/opea/finetuning) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/Dockerfile) | OPEA Fine-tuning microservice for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/README.md) | | [opea/finetuning-gaudi](https://hub.docker.com/r/opea/finetuning-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/Dockerfile.intel_hpu) | OPEA Fine-tuning microservice for GenAI application use on the Gaudi | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/README.md) | | [opea/finetuning-xtune](https://hub.docker.com/r/opea/finetuning-xtune) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/Dockerfile.xtune) | OPEA Fine-tuning microservice base on Xtune for GenAI application use on the Arc A770 | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/src/README.md) | @@ -83,7 +83,7 @@ Take ChatQnA for example. ChatQnA is a chatbot application service based on the | [opea/llm-textgen](https://hub.docker.com/r/opea/llm-textgen) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile) | OPEA LLM microservice upon textgen docker image for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/README.md) | | [opea/llm-textgen-gaudi](https://hub.docker.com/r/opea/llm-textgen-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile.intel_hpu) | OPEA LLM microservice upon textgen docker image for GenAI application use on the Gaudi2 | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/README.md) | | [opea/llm-textgen-phi4-gaudi](https://hub.docker.com/r/opea/llm-textgen-phi4-gaudi) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/Dockerfile.intel_hpu_phi4) | OPEA LLM microservice upon textgen docker image for GenAI application use on the Gaudi2 with Phi4 optimization. | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/llms/src/text-generation/README_native.md) | -| [opea/lvm](https://hub.docker.com/r/opea/lvm) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/Dockerfile) | OPEA large visual model (LVM) microservice for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/README.md) | +| [opea/lvm](https://hub.docker.com/r/opea/lvm) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/src/Dockerfile) | OPEA large visual model (LVM) microservice for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/lvms/README.md) | | [opea/lvm-llama-vision](https://hub.docker.com/r/opea/lvm-llama-vision) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/Dockerfile) | OPEA microservice running Llama Vision as a large visualization model (LVM) server for GenAI applications | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/README.md) | | [opea/lvm-llama-vision-guard](https://hub.docker.com/r/opea/lvm-llama-vision-guard) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/Dockerfile.guard) | OPEA microservice running Llama Vision Guard as a large visualization model (LVM) server for GenAI applications | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/README.md) | | [opea/lvm-llama-vision-tp](https://hub.docker.com/r/opea/lvm-llama-vision-tp) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/Dockerfile.tp) | OPEA microservice running Llama Vision with DeepSpeed as a large visualization model (LVM) server for GenAI applications | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/llama-vision/src/README.md) | @@ -93,7 +93,7 @@ Take ChatQnA for example. ChatQnA is a chatbot application service based on the | [opea/lvm-video-llama](https://hub.docker.com/r/opea/lvm-video-llama) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/video-llama/src/Dockerfile) | OPEA microservice running Video-Llama as a large visualization model (LVM) server for GenAI applications | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/video-llama/src/README.md) | | [opea/nginx](https://hub.docker.com/r/opea/nginx) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/nginx/src/Dockerfile) | OPEA nginx microservice for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/nginx/deployment/kubernetes/README.md) | | [opea/pathway](https://hub.docker.com/r/opea/pathway) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/pathway/src/Dockerfile) | OPEA Pathway microservice for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/pathway/src/README.md) | -| [opea/promptregistry-mongo](https://hub.docker.com/r/opea/promptregistry-mongo) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/prompt_registry/src/Dockerfile) | OPEA Prompt Registry microservice based on MongoDB database, designed to store and retrieve user preference prompts | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/prompt_registry/src/README.md) | +| [opea/promptregistry](https://hub.docker.com/r/opea/promptregistry) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/prompt_registry/src/Dockerfile) | OPEA Prompt Registry microservice based on NoSQL database, designed to store and retrieve user preference prompts | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/prompt_registry/README.md) | | [opea/reranking](https://hub.docker.com/r/opea/reranking) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/rerankings/src/Dockerfile) | OPEA reranking microservice for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/rerankings/src/README.md) | | [opea/retriever](https://hub.docker.com/r/opea/retriever) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/src/Dockerfile) | OPEA retrieval microservice for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/README.md) | | [opea/speecht5](https://hub.docker.com/r/opea/speecht5) | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/third_parties/speecht5/src/Dockerfile) | OPEA SpeechT5 service for GenAI application | [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/tts/src/README.md) | From 24b7e988bd25ad0f30a0c370f4fbf7c7bb05623f Mon Sep 17 00:00:00 2001 From: zhihang Date: Wed, 10 Sep 2025 13:41:57 +0800 Subject: [PATCH 16/78] Add openEuler support for CodeGen (#2227) Signed-off-by: zhihang --- CodeGen/Dockerfile.openEuler | 10 + .../intel/cpu/xeon/compose_openeuler.yaml | 175 ++++++++++++ .../intel/cpu/xeon/compose_tgi_openeuler.yaml | 174 ++++++++++++ CodeGen/docker_image_build/build.yaml | 53 ++++ .../tests/test_compose_openeuler_on_xeon.sh | 259 ++++++++++++++++++ CodeGen/ui/docker/Dockerfile.gradio.openEuler | 30 ++ CodeGen/ui/docker/Dockerfile.openEuler | 26 ++ CodeGen/ui/docker/Dockerfile.react.openEuler | 21 ++ 8 files changed, 748 insertions(+) create mode 100644 CodeGen/Dockerfile.openEuler create mode 100644 CodeGen/docker_compose/intel/cpu/xeon/compose_openeuler.yaml create mode 100644 CodeGen/docker_compose/intel/cpu/xeon/compose_tgi_openeuler.yaml create mode 100644 CodeGen/tests/test_compose_openeuler_on_xeon.sh create mode 100644 CodeGen/ui/docker/Dockerfile.gradio.openEuler create mode 100644 CodeGen/ui/docker/Dockerfile.openEuler create mode 100644 CodeGen/ui/docker/Dockerfile.react.openEuler diff --git a/CodeGen/Dockerfile.openEuler b/CodeGen/Dockerfile.openEuler new file mode 100644 index 0000000000..ef31614245 --- /dev/null +++ b/CodeGen/Dockerfile.openEuler @@ -0,0 +1,10 @@ +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +ARG IMAGE_REPO=opea +ARG BASE_TAG=latest +FROM $IMAGE_REPO/comps-base:$BASE_TAG-openeuler + +COPY ./codegen.py $HOME/codegen.py + +ENTRYPOINT ["python", "codegen.py"] diff --git a/CodeGen/docker_compose/intel/cpu/xeon/compose_openeuler.yaml b/CodeGen/docker_compose/intel/cpu/xeon/compose_openeuler.yaml new file mode 100644 index 0000000000..c4f6d6791d --- /dev/null +++ b/CodeGen/docker_compose/intel/cpu/xeon/compose_openeuler.yaml @@ -0,0 +1,175 @@ +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +services: + + vllm-service: + image: openeuler/vllm-cpu:0.9.1-oe2403lts + container_name: vllm-server + ports: + - "8028:80" + volumes: + - "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub" + shm_size: 1g + privileged: true + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HF_TOKEN: ${HF_TOKEN} + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 + command: --model ${LLM_MODEL_ID} --host 0.0.0.0 --port 80 + llm-base: + image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}-openeuler + container_name: llm-textgen-server + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + LLM_ENDPOINT: ${LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} + HF_TOKEN: ${HF_TOKEN} + restart: unless-stopped + llm-vllm-service: + extends: llm-base + container_name: llm-codegen-vllm-server + ports: + - "9000:9000" + ipc: host + depends_on: + vllm-service: + condition: service_healthy + codegen-xeon-backend-server: + image: ${REGISTRY:-opea}/codegen:${TAG:-latest}-openeuler + container_name: codegen-xeon-backend-server + depends_on: + llm-base: + condition: service_started + dataprep-redis-server: + condition: service_healthy + ports: + - "7778:7778" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP} + - RETRIEVAL_SERVICE_HOST_IP=${RETRIEVAL_SERVICE_HOST_IP} + - REDIS_RETRIEVER_PORT=${REDIS_RETRIEVER_PORT} + - TEI_EMBEDDING_HOST_IP=${TEI_EMBEDDING_HOST_IP} + - EMBEDDER_PORT=${EMBEDDER_PORT} + ipc: host + restart: always + codegen-xeon-ui-server: + image: ${REGISTRY:-opea}/codegen-gradio-ui:${TAG:-latest}-openeuler + container_name: codegen-xeon-ui-server + depends_on: + - codegen-xeon-backend-server + ports: + - "5173:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - BASIC_URL=${BACKEND_SERVICE_ENDPOINT} + - MEGA_SERVICE_PORT=${MEGA_SERVICE_PORT} + - host_ip=${host_ip} + - DATAPREP_ENDPOINT=${DATAPREP_ENDPOINT} + - DATAPREP_REDIS_PORT=${DATAPREP_REDIS_PORT} + ipc: host + restart: always + redis-vector-db: + image: redis/redis-stack:7.2.0-v9 + container_name: redis-vector-db + ports: + - "${REDIS_DB_PORT:-6379}:${REDIS_DB_PORT:-6379}" + - "${REDIS_INSIGHTS_PORT:-8001}:${REDIS_INSIGHTS_PORT:-8001}" + dataprep-redis-server: + image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}-openeuler + container_name: dataprep-redis-server + depends_on: + - redis-vector-db + ports: + - "${DATAPREP_REDIS_PORT}:5000" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + REDIS_URL: ${REDIS_URL} + REDIS_HOST: ${host_ip} + INDEX_NAME: ${INDEX_NAME} + HF_TOKEN: ${HF_TOKEN} + LOGFLAG: true + healthcheck: + test: ["CMD-SHELL", "curl -f http://localhost:5000/v1/health_check || exit 1"] + interval: 10s + timeout: 5s + retries: 10 + restart: unless-stopped + tei-embedding-serving: + image: openeuler/text-embeddings-inference-cpu:1.7.0-oe2403lts + container_name: tei-embedding-serving + entrypoint: /bin/sh -c "yum update -y && yum install -y curl && text-embeddings-router --json-output --model-id ${EMBEDDING_MODEL_ID} --auto-truncate" + ports: + - "${TEI_EMBEDDER_PORT:-12000}:80" + volumes: + - "./data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + host_ip: ${host_ip} + HF_TOKEN: ${HF_TOKEN} + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:80/health"] + interval: 10s + timeout: 6s + retries: 48 + tei-embedding-server: + image: ${REGISTRY:-opea}/embedding:${TAG:-latest}-openeuler + container_name: tei-embedding-server + ports: + - "${EMBEDDER_PORT:-10201}:6000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + EMBEDDING_COMPONENT_NAME: "OPEA_TEI_EMBEDDING" + depends_on: + tei-embedding-serving: + condition: service_healthy + restart: unless-stopped + retriever-redis: + image: ${REGISTRY:-opea}/retriever:${TAG:-latest}-openeuler + container_name: retriever-redis + depends_on: + - redis-vector-db + ports: + - "${REDIS_RETRIEVER_PORT:-7000}:${REDIS_RETRIEVER_PORT:-7000}" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + REDIS_URL: ${REDIS_URL} + REDIS_DB_PORT: ${REDIS_DB_PORT} + REDIS_INSIGHTS_PORT: ${REDIS_INSIGHTS_PORT} + REDIS_RETRIEVER_PORT: ${REDIS_RETRIEVER_PORT} + INDEX_NAME: ${INDEX_NAME} + TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + HF_TOKEN: ${HF_TOKEN} + LOGFLAG: ${LOGFLAG} + RETRIEVER_COMPONENT_NAME: ${RETRIEVER_COMPONENT_NAME:-OPEA_RETRIEVER_REDIS} + restart: unless-stopped +networks: + default: + driver: bridge diff --git a/CodeGen/docker_compose/intel/cpu/xeon/compose_tgi_openeuler.yaml b/CodeGen/docker_compose/intel/cpu/xeon/compose_tgi_openeuler.yaml new file mode 100644 index 0000000000..93e997371f --- /dev/null +++ b/CodeGen/docker_compose/intel/cpu/xeon/compose_tgi_openeuler.yaml @@ -0,0 +1,174 @@ +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +services: + + tgi-service: + image: openeuler/text-generation-inference-cpu:2.4.0-oe2403lts + container_name: tgi-server + ports: + - "8028:80" + volumes: + - "${MODEL_CACHE:-./data}:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HF_TOKEN: ${HF_TOKEN} + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 + command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 + llm-base: + image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}-openeuler + container_name: llm-textgen-server + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + LLM_ENDPOINT: ${LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} + HF_TOKEN: ${HF_TOKEN} + restart: unless-stopped + llm-tgi-service: + extends: llm-base + container_name: llm-codegen-tgi-server + ports: + - "9000:9000" + ipc: host + depends_on: + tgi-service: + condition: service_healthy + codegen-xeon-backend-server: + image: ${REGISTRY:-opea}/codegen:${TAG:-latest}-openeuler + container_name: codegen-xeon-backend-server + depends_on: + llm-base: + condition: service_started + dataprep-redis-server: + condition: service_healthy + ports: + - "7778:7778" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP} + - RETRIEVAL_SERVICE_HOST_IP=${RETRIEVAL_SERVICE_HOST_IP} + - REDIS_RETRIEVER_PORT=${REDIS_RETRIEVER_PORT} + - TEI_EMBEDDING_HOST_IP=${TEI_EMBEDDING_HOST_IP} + - EMBEDDER_PORT=${EMBEDDER_PORT} + ipc: host + restart: always + codegen-xeon-ui-server: + image: ${REGISTRY:-opea}/codegen-gradio-ui:${TAG:-latest}-openeuler + container_name: codegen-xeon-ui-server + depends_on: + - codegen-xeon-backend-server + ports: + - "5173:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - BASIC_URL=${BACKEND_SERVICE_ENDPOINT} + - MEGA_SERVICE_PORT=${MEGA_SERVICE_PORT} + - host_ip=${host_ip} + - DATAPREP_ENDPOINT=${DATAPREP_ENDPOINT} + - DATAPREP_REDIS_PORT=${DATAPREP_REDIS_PORT} + ipc: host + restart: always + redis-vector-db: + image: redis/redis-stack:7.2.0-v9 + container_name: redis-vector-db + ports: + - "${REDIS_DB_PORT}:${REDIS_DB_PORT}" + - "${REDIS_INSIGHTS_PORT}:${REDIS_INSIGHTS_PORT}" + dataprep-redis-server: + image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}-openeuler + container_name: dataprep-redis-server + depends_on: + - redis-vector-db + ports: + - "${DATAPREP_REDIS_PORT}:5000" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + REDIS_URL: ${REDIS_URL} + REDIS_HOST: ${host_ip} + INDEX_NAME: ${INDEX_NAME} + HF_TOKEN: ${HF_TOKEN} + LOGFLAG: true + healthcheck: + test: ["CMD-SHELL", "curl -f http://localhost:5000/v1/health_check || exit 1"] + interval: 10s + timeout: 5s + retries: 10 + restart: unless-stopped + tei-embedding-serving: + image: openeuler/text-embeddings-inference-cpu:1.7.0-oe2403lts + container_name: tei-embedding-serving + entrypoint: /bin/sh -c "yum update -y && yum install -y curl && text-embeddings-router --json-output --model-id ${EMBEDDING_MODEL_ID} --auto-truncate" + ports: + - "${TEI_EMBEDDER_PORT:-12000}:80" + volumes: + - "./data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + host_ip: ${host_ip} + HF_TOKEN: ${HF_TOKEN} + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:80/health"] + interval: 10s + timeout: 6s + retries: 48 + tei-embedding-server: + image: ${REGISTRY:-opea}/embedding:${TAG:-latest}-openeuler + container_name: tei-embedding-server + ports: + - "${EMBEDDER_PORT:-10201}:6000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + EMBEDDING_COMPONENT_NAME: "OPEA_TEI_EMBEDDING" + depends_on: + tei-embedding-serving: + condition: service_healthy + restart: unless-stopped + retriever-redis: + image: ${REGISTRY:-opea}/retriever:${TAG:-latest}-openeuler + container_name: retriever-redis + depends_on: + - redis-vector-db + ports: + - "${REDIS_RETRIEVER_PORT}:${REDIS_RETRIEVER_PORT}" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + REDIS_URL: ${REDIS_URL} + REDIS_DB_PORT: ${REDIS_DB_PORT} + REDIS_INSIGHTS_PORT: ${REDIS_INSIGHTS_PORT} + REDIS_RETRIEVER_PORT: ${REDIS_RETRIEVER_PORT} + INDEX_NAME: ${INDEX_NAME} + TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + HF_TOKEN: ${HF_TOKEN} + LOGFLAG: ${LOGFLAG} + RETRIEVER_COMPONENT_NAME: ${RETRIEVER_COMPONENT_NAME:-OPEA_RETRIEVER_REDIS} + restart: unless-stopped +networks: + default: + driver: bridge diff --git a/CodeGen/docker_image_build/build.yaml b/CodeGen/docker_image_build/build.yaml index 282c29766c..8edc33f8df 100644 --- a/CodeGen/docker_image_build/build.yaml +++ b/CodeGen/docker_image_build/build.yaml @@ -31,12 +31,47 @@ services: dockerfile: ./docker/Dockerfile.gradio extends: codegen image: ${REGISTRY:-opea}/codegen-gradio-ui:${TAG:-latest} + codegen-openeuler: + build: + args: + IMAGE_REPO: ${REGISTRY} + BASE_TAG: ${TAG} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + no_proxy: ${no_proxy} + context: ../ + dockerfile: ./Dockerfile.openEuler + image: ${REGISTRY:-opea}/codegen:${TAG:-latest}-openeuler + codegen-ui-openeuler: + build: + context: ../ui + dockerfile: ./docker/Dockerfile.openEuler + extends: codegen + image: ${REGISTRY:-opea}/codegen-ui:${TAG:-latest}-openeuler + codegen-react-ui-openeuler: + build: + context: ../ui + dockerfile: ./docker/Dockerfile.react.openEuler + extends: codegen + image: ${REGISTRY:-opea}/codegen-react-ui:${TAG:-latest}-openeuler + codegen-gradio-ui-openeuler: + build: + context: ../ui + dockerfile: ./docker/Dockerfile.gradio.openEuler + extends: codegen + image: ${REGISTRY:-opea}/codegen-gradio-ui:${TAG:-latest}-openeuler llm-textgen: build: context: GenAIComps dockerfile: comps/llms/src/text-generation/Dockerfile extends: codegen image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest} + llm-textgen-openeuler: + build: + context: GenAIComps + dockerfile: comps/llms/src/text-generation/Dockerfile.openEuler + extends: codegen + image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}-openeuler vllm-rocm: build: context: GenAIComps @@ -61,15 +96,33 @@ services: dockerfile: comps/dataprep/src/Dockerfile extends: codegen image: ${REGISTRY:-opea}/dataprep:${TAG:-latest} + dataprep-openeuler: + build: + context: GenAIComps + dockerfile: comps/dataprep/src/Dockerfile.openEuler + extends: codegen + image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}-openeuler retriever: build: context: GenAIComps dockerfile: comps/retrievers/src/Dockerfile extends: codegen image: ${REGISTRY:-opea}/retriever:${TAG:-latest} + retriever-openeuler: + build: + context: GenAIComps + dockerfile: comps/retrievers/src/Dockerfile.openEuler + extends: codegen + image: ${REGISTRY:-opea}/retriever:${TAG:-latest}-openeuler embedding: build: context: GenAIComps dockerfile: comps/embeddings/src/Dockerfile extends: codegen image: ${REGISTRY:-opea}/embedding:${TAG:-latest} + embedding-openeuler: + build: + context: GenAIComps + dockerfile: comps/embeddings/src/Dockerfile.openEuler + extends: codegen + image: ${REGISTRY:-opea}/embedding:${TAG:-latest}-openeuler diff --git a/CodeGen/tests/test_compose_openeuler_on_xeon.sh b/CodeGen/tests/test_compose_openeuler_on_xeon.sh new file mode 100644 index 0000000000..81d4bf4b0b --- /dev/null +++ b/CodeGen/tests/test_compose_openeuler_on_xeon.sh @@ -0,0 +1,259 @@ +#!/bin/bash +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +set -xe +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} +export MODEL_CACHE=${model_cache:-"./data"} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') +source $WORKPATH/docker_compose/intel/set_env.sh + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + echo "GenAIComps test commit is $(git rev-parse HEAD)" + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG}-openeuler --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.openEuler . + popd && sleep 1s + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="codegen-openeuler codegen-gradio-ui-openeuler llm-textgen-openeuler dataprep-openeuler retriever-openeuler embedding-openeuler" + + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + + docker pull openeuler/text-generation-inference-cpu:2.4.0-oe2403lts + docker images && sleep 1s +} + +function start_services() { + local compose_file="$1" + local llm_container_name="$2" + + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + + # Start Docker Containers + docker compose -f ${compose_file} up -d > ${LOG_PATH}/start_services_with_compose.log + + n=0 + until [[ "$n" -ge 100 ]]; do + docker logs ${llm_container_name} > ${LOG_PATH}/llm_service_start.log 2>&1 + if grep -E "Connected|complete" ${LOG_PATH}/llm_service_start.log; then + break + fi + sleep 5s + n=$((n+1)) + done +} + +function validate_services() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + if [[ "$SERVICE_NAME" == "ingest" ]]; then + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$INPUT_DATA" -F index_name=test_redis -H 'Content-Type: multipart/form-data' "$URL") + + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Data preparation succeeded..." + else + echo "[ $SERVICE_NAME ] Data preparation failed..." + fi + + else + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + + local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) + + if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then + echo "[ $SERVICE_NAME ] Content is as expected." + else + echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + fi + sleep 5s +} + +function validate_microservices() { + local llm_container_name="$1" + + # tgi for llm service + validate_services \ + "${ip_address}:8028/v1/chat/completions" \ + "completion_tokens" \ + "llm-service" \ + "${llm_container_name}" \ + '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "What is Deep Learning?"}], "max_tokens": 256}' + + # llm microservice + validate_services \ + "${ip_address}:9000/v1/chat/completions" \ + "data: " \ + "llm" \ + "llm-textgen-server" \ + '{"query":"def print_hello_world():", "max_tokens": 256}' + + # Data ingest microservice + validate_services \ + "${ip_address}:6007/v1/dataprep/ingest" \ + "Data preparation succeeded" \ + "ingest" \ + "dataprep-redis-server" \ + 'link_list=["https://modin.readthedocs.io/en/latest/index.html"]' + +} + +function validate_megaservice() { + # Curl the Mega Service + validate_services \ + "${ip_address}:7778/v1/codegen" \ + "print" \ + "mega-codegen" \ + "codegen-xeon-backend-server" \ + '{"messages": "def print_hello_world():", "max_tokens": 256}' + + # Curl the Mega Service with stream as false + validate_services \ + "${ip_address}:7778/v1/codegen" \ + "" \ + "mega-codegen" \ + "codegen-xeon-backend-server" \ + '{ "messages": "def print_hello_world():", "max_tokens": 256, "stream": false}' + + # Curl the Mega Service with index_name and agents_flag + validate_services \ + "${ip_address}:7778/v1/codegen" \ + "" \ + "mega-codegen" \ + "codegen-xeon-backend-server" \ + '{ "index_name": "test_redis", "agents_flag": "True", "messages": "def print_hello_world():", "max_tokens": 256}' + + validate_services \ + "${ip_address}:7778/v1/codegen" \ + "class" \ + "mega-codegen" \ + "codegen-xeon-backend-server" \ + '{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "Implement a basic Python class"}], "max_tokens":32}' + +} + +function validate_frontend() { + cd $WORKPATH/ui/svelte + echo "[TEST INFO]: Preparing frontend test using Docker..." + + sed -i "s/localhost/$ip_address/g" playwright.config.ts + + echo "[TEST INFO]: Running frontend tests in Docker..." + exit_status=0 + + docker run --rm \ + --network="host" \ + -v $PWD:/work \ + -w /work \ + mcr.microsoft.com/playwright:v1.40.0-focal \ + /bin/bash -c " + npm install && + npm ci && + npx playwright install && + npx playwright test + " || exit_status=$? + + if [ $exit_status -ne 0 ]; then + echo "[TEST INFO]: ---------frontend test failed---------" + exit $exit_status + else + echo "[TEST INFO]: ---------frontend test passed---------" + fi +} + +function validate_gradio() { + local URL="http://${ip_address}:5173/health" + local HTTP_STATUS=$(curl "$URL") + local SERVICE_NAME="Gradio" + + if [ "$HTTP_STATUS" = '{"status":"ok"}' ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. UI server is running successfully..." + else + echo "[ $SERVICE_NAME ] UI server has failed..." + fi +} + +function stop_service() { + local compose_file="$1" + + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + docker compose -f ${compose_file} down +} + +function main() { + # all docker docker compose files for Xeon Platform + docker_compose_files=("compose_openeuler.yaml") + docker_llm_container_names=( "vllm-server") + + # get number of compose files and LLM docker container names + len_compose_files=${#docker_compose_files[@]} + len_containers=${#docker_llm_container_names[@]} + + # number of compose files and docker container names must be matched + if [ ${len_compose_files} -ne ${len_containers} ]; then + echo "Error: number of docker compose files ${len_compose_files} and container names ${len_containers} mismatched" + exit 1 + fi + + # stop_service, stop all compose files + for ((i = 0; i < len_compose_files; i++)); do + stop_service "${docker_compose_files[${i}]}" + done + + # build docker images + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + + # loop all compose files + for ((i = 0; i < len_compose_files; i++)); do + echo "Process [${i}]: ${docker_compose_files[$i]}, ${docker_llm_container_names[${i}]}" + docker ps -a + + echo "::group::start_services" + start_services "${docker_compose_files[${i}]}" "${docker_llm_container_names[${i}]}" + echo "::endgroup::" + + echo "::group::validate_microservices" + validate_microservices "${docker_llm_container_names[${i}]}" + echo "::endgroup::" + + echo "::group::validate_megaservice" + validate_megaservice + echo "::endgroup::" + + echo "::group::validate_gradio" + validate_gradio + echo "::endgroup::" + + stop_service "${docker_compose_files[${i}]}" + sleep 5s + done + + docker system prune -f +} + +main diff --git a/CodeGen/ui/docker/Dockerfile.gradio.openEuler b/CodeGen/ui/docker/Dockerfile.gradio.openEuler new file mode 100644 index 0000000000..ce524ccbd7 --- /dev/null +++ b/CodeGen/ui/docker/Dockerfile.gradio.openEuler @@ -0,0 +1,30 @@ +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/python:3.11.13-oe2403lts + +ENV LANG=C.UTF-8 + +ARG ARCH="cpu" + +RUN yum update -y && \ + yum install -y \ + gcc \ + g++ \ + make \ + java-21-openjdk \ + jemalloc-devel \ + ffmpeg \ + wget && \ + yum clean all && \ + rm -rf /var/cache/yum + +RUN mkdir -p /home/user + +COPY gradio /home/user/gradio + +RUN pip install --no-cache-dir --upgrade pip setuptools && \ +pip install --no-cache-dir -r /home/user/gradio/requirements.txt + +WORKDIR /home/user/gradio +ENTRYPOINT ["python", "codegen_ui_gradio.py"] diff --git a/CodeGen/ui/docker/Dockerfile.openEuler b/CodeGen/ui/docker/Dockerfile.openEuler new file mode 100644 index 0000000000..1d5115f4b5 --- /dev/null +++ b/CodeGen/ui/docker/Dockerfile.openEuler @@ -0,0 +1,26 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Use node 20.11.1 as the base image +FROM node:20.11.1 + +# Update package manager and install Git +RUN apt-get update -y && apt-get install -y git + +# Copy the front-end code repository +COPY svelte /home/user/svelte + +# Set the working directory +WORKDIR /home/user/svelte + +# Install front-end dependencies +RUN npm install + +# Build the front-end application +RUN npm run build + +# Expose the port of the front-end application +EXPOSE 5173 + +# Run the front-end application in preview mode +CMD ["npm", "run", "preview", "--", "--port", "5173", "--host", "0.0.0.0"] diff --git a/CodeGen/ui/docker/Dockerfile.react.openEuler b/CodeGen/ui/docker/Dockerfile.react.openEuler new file mode 100644 index 0000000000..844b735cdc --- /dev/null +++ b/CodeGen/ui/docker/Dockerfile.react.openEuler @@ -0,0 +1,21 @@ +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +# Use node 20.11.1 as the base image +FROM openeuler/node:20.11.1-oe2403lts as vite-app + +COPY react /usr/app/react +WORKDIR /usr/app/react + + +RUN ["npm", "install"] +RUN ["npm", "run", "build"] + + +FROM openeuler/nginx:1.29.0-oe2403lts + +COPY --from=vite-app /usr/app/react/dist /usr/share/nginx/html +COPY ./react/env.sh /docker-entrypoint.d/env.sh + +COPY ./react/nginx.conf /etc/nginx/conf.d/default.conf +RUN chmod +x /docker-entrypoint.d/env.sh From 751d8f7bf43c8efc17cc2d9726599377c57cbe20 Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Thu, 11 Sep 2025 16:01:00 +0800 Subject: [PATCH 17/78] Use vllm release image for CodeGen, CodeTrans, DocSum and VisualQnA (#2241) Signed-off-by: ZePan110 --- CodeGen/docker_compose/amd/cpu/epyc/compose.yaml | 2 +- CodeGen/docker_compose/intel/cpu/xeon/compose.yaml | 2 +- CodeGen/docker_image_build/build.yaml | 6 ------ CodeGen/tests/test_compose_on_epyc.sh | 13 +------------ CodeGen/tests/test_compose_on_xeon.sh | 8 +------- CodeGen/tests/test_compose_tgi_on_epyc.sh | 13 +------------ CodeTrans/docker_compose/amd/cpu/epyc/compose.yaml | 2 +- .../docker_compose/intel/cpu/xeon/compose.yaml | 2 +- CodeTrans/docker_image_build/build.yaml | 6 ------ CodeTrans/tests/test_compose_on_epyc.sh | 13 +------------ CodeTrans/tests/test_compose_on_xeon.sh | 8 +------- DocSum/docker_compose/amd/cpu/epyc/compose.yaml | 2 +- DocSum/docker_compose/intel/cpu/xeon/compose.yaml | 2 +- DocSum/docker_image_build/build.yaml | 6 ------ DocSum/tests/test_compose_on_epyc.sh | 13 +------------ DocSum/tests/test_compose_on_xeon.sh | 8 +------- VisualQnA/docker_compose/amd/cpu/epyc/compose.yaml | 2 +- .../docker_compose/intel/cpu/xeon/compose.yaml | 2 +- VisualQnA/docker_image_build/build.yaml | 6 ------ VisualQnA/tests/test_compose_on_epyc.sh | 12 +----------- VisualQnA/tests/test_compose_on_xeon.sh | 7 +------ 21 files changed, 17 insertions(+), 118 deletions(-) diff --git a/CodeGen/docker_compose/amd/cpu/epyc/compose.yaml b/CodeGen/docker_compose/amd/cpu/epyc/compose.yaml index 3622248e0b..3c19a7e459 100644 --- a/CodeGen/docker_compose/amd/cpu/epyc/compose.yaml +++ b/CodeGen/docker_compose/amd/cpu/epyc/compose.yaml @@ -4,7 +4,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-server ports: - "8028:80" diff --git a/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml b/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml index abb1933789..e4ae6c255b 100644 --- a/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml +++ b/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml @@ -4,7 +4,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-server ports: - "8028:80" diff --git a/CodeGen/docker_image_build/build.yaml b/CodeGen/docker_image_build/build.yaml index 8edc33f8df..8308291eb2 100644 --- a/CodeGen/docker_image_build/build.yaml +++ b/CodeGen/docker_image_build/build.yaml @@ -78,12 +78,6 @@ services: dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu extends: codegen image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest} - vllm: - build: - context: vllm - dockerfile: docker/Dockerfile.cpu - extends: codegen - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} vllm-gaudi: build: context: vllm-fork diff --git a/CodeGen/tests/test_compose_on_epyc.sh b/CodeGen/tests/test_compose_on_epyc.sh index efe3c25598..db8498b369 100644 --- a/CodeGen/tests/test_compose_on_epyc.sh +++ b/CodeGen/tests/test_compose_on_epyc.sh @@ -28,19 +28,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="codegen codegen-gradio-ui llm-textgen vllm dataprep retriever embedding" + service_list="codegen codegen-gradio-ui llm-textgen dataprep retriever embedding" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log diff --git a/CodeGen/tests/test_compose_on_xeon.sh b/CodeGen/tests/test_compose_on_xeon.sh index 0b138b2235..adc21215b5 100644 --- a/CodeGen/tests/test_compose_on_xeon.sh +++ b/CodeGen/tests/test_compose_on_xeon.sh @@ -26,14 +26,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="codegen codegen-gradio-ui llm-textgen vllm dataprep retriever embedding" + service_list="codegen codegen-gradio-ui llm-textgen dataprep retriever embedding" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log diff --git a/CodeGen/tests/test_compose_tgi_on_epyc.sh b/CodeGen/tests/test_compose_tgi_on_epyc.sh index 9476925d3f..2652e5b477 100644 --- a/CodeGen/tests/test_compose_tgi_on_epyc.sh +++ b/CodeGen/tests/test_compose_tgi_on_epyc.sh @@ -28,19 +28,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="codegen codegen-gradio-ui llm-textgen vllm dataprep retriever embedding" + service_list="codegen codegen-gradio-ui llm-textgen dataprep retriever embedding" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log diff --git a/CodeTrans/docker_compose/amd/cpu/epyc/compose.yaml b/CodeTrans/docker_compose/amd/cpu/epyc/compose.yaml index 3b499de95b..d1b539e5f4 100644 --- a/CodeTrans/docker_compose/amd/cpu/epyc/compose.yaml +++ b/CodeTrans/docker_compose/amd/cpu/epyc/compose.yaml @@ -4,7 +4,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: codetrans-epyc-vllm-service ports: - "8008:80" diff --git a/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml b/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml index f950c770ec..4e4464f220 100644 --- a/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml +++ b/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml @@ -3,7 +3,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: codetrans-xeon-vllm-service ports: - "8008:80" diff --git a/CodeTrans/docker_image_build/build.yaml b/CodeTrans/docker_image_build/build.yaml index b230d1d4ec..ad63a520b6 100644 --- a/CodeTrans/docker_image_build/build.yaml +++ b/CodeTrans/docker_image_build/build.yaml @@ -25,12 +25,6 @@ services: dockerfile: comps/llms/src/text-generation/Dockerfile extends: codetrans image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest} - vllm: - build: - context: vllm - dockerfile: docker/Dockerfile.cpu - extends: codetrans - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} vllm-gaudi: build: context: vllm-fork diff --git a/CodeTrans/tests/test_compose_on_epyc.sh b/CodeTrans/tests/test_compose_on_epyc.sh index 50a9fb68b0..3a77d62919 100644 --- a/CodeTrans/tests/test_compose_on_epyc.sh +++ b/CodeTrans/tests/test_compose_on_epyc.sh @@ -28,19 +28,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="codetrans codetrans-ui llm-textgen vllm nginx" + service_list="codetrans codetrans-ui llm-textgen nginx" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/CodeTrans/tests/test_compose_on_xeon.sh b/CodeTrans/tests/test_compose_on_xeon.sh index 8418ba05d1..86d7d5b8e8 100644 --- a/CodeTrans/tests/test_compose_on_xeon.sh +++ b/CodeTrans/tests/test_compose_on_xeon.sh @@ -25,14 +25,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="codetrans codetrans-ui llm-textgen vllm nginx" + service_list="codetrans codetrans-ui llm-textgen nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/DocSum/docker_compose/amd/cpu/epyc/compose.yaml b/DocSum/docker_compose/amd/cpu/epyc/compose.yaml index ba0c4c0178..3a687b9b9c 100644 --- a/DocSum/docker_compose/amd/cpu/epyc/compose.yaml +++ b/DocSum/docker_compose/amd/cpu/epyc/compose.yaml @@ -4,7 +4,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: docsum-epyc-vllm-service ports: - "8008:80" diff --git a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml index 917bfc1140..163129ce95 100644 --- a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml +++ b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml @@ -3,7 +3,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: docsum-xeon-vllm-service ports: - ${LLM_ENDPOINT_PORT:-8008}:80 diff --git a/DocSum/docker_image_build/build.yaml b/DocSum/docker_image_build/build.yaml index b4a2eb9c54..d26c32fe5a 100644 --- a/DocSum/docker_image_build/build.yaml +++ b/DocSum/docker_image_build/build.yaml @@ -54,12 +54,6 @@ services: context: GenAIComps dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest} - vllm: - build: - context: vllm - dockerfile: docker/Dockerfile.cpu - extends: docsum - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} vllm-gaudi: build: context: vllm-fork diff --git a/DocSum/tests/test_compose_on_epyc.sh b/DocSum/tests/test_compose_on_epyc.sh index 1a4959366a..79989d0ef7 100644 --- a/DocSum/tests/test_compose_on_epyc.sh +++ b/DocSum/tests/test_compose_on_epyc.sh @@ -38,19 +38,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="docsum docsum-gradio-ui whisper llm-docsum vllm" + service_list="docsum docsum-gradio-ui whisper llm-docsum" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/DocSum/tests/test_compose_on_xeon.sh b/DocSum/tests/test_compose_on_xeon.sh index 5ceed1ec03..2b897853f9 100644 --- a/DocSum/tests/test_compose_on_xeon.sh +++ b/DocSum/tests/test_compose_on_xeon.sh @@ -36,14 +36,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="docsum docsum-gradio-ui whisper llm-docsum vllm" + service_list="docsum docsum-gradio-ui whisper llm-docsum" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/VisualQnA/docker_compose/amd/cpu/epyc/compose.yaml b/VisualQnA/docker_compose/amd/cpu/epyc/compose.yaml index 9352f411f7..6e8e05e3be 100644 --- a/VisualQnA/docker_compose/amd/cpu/epyc/compose.yaml +++ b/VisualQnA/docker_compose/amd/cpu/epyc/compose.yaml @@ -5,7 +5,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - ${VLLM_PORT:-8399}:80 diff --git a/VisualQnA/docker_compose/intel/cpu/xeon/compose.yaml b/VisualQnA/docker_compose/intel/cpu/xeon/compose.yaml index 47a99a6b0b..c523908264 100644 --- a/VisualQnA/docker_compose/intel/cpu/xeon/compose.yaml +++ b/VisualQnA/docker_compose/intel/cpu/xeon/compose.yaml @@ -3,7 +3,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - ${VLLM_PORT:-8399}:80 diff --git a/VisualQnA/docker_image_build/build.yaml b/VisualQnA/docker_image_build/build.yaml index e8b1240040..5693935fb6 100644 --- a/VisualQnA/docker_image_build/build.yaml +++ b/VisualQnA/docker_image_build/build.yaml @@ -37,12 +37,6 @@ services: dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu extends: visualqna image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest} - vllm: - build: - context: vllm - dockerfile: docker/Dockerfile.cpu - extends: visualqna - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} vllm-gaudi: build: context: vllm-fork diff --git a/VisualQnA/tests/test_compose_on_epyc.sh b/VisualQnA/tests/test_compose_on_epyc.sh index 7eb33312b1..e702b40066 100644 --- a/VisualQnA/tests/test_compose_on_epyc.sh +++ b/VisualQnA/tests/test_compose_on_epyc.sh @@ -26,18 +26,8 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &>/dev/null - VLLM_REQ_FILE="requirements/cpu.txt" - if ! grep -q "^transformers" "$VLLM_REQ_FILE"; then - echo "Adding transformers<4.54.0 to $VLLM_REQ_FILE" - echo "transformers<4.54.0" >>"$VLLM_REQ_FILE" - fi - cd ../ - service_list="visualqna visualqna-ui lvm nginx vllm" + service_list="visualqna visualqna-ui lvm nginx" docker compose -f build.yaml build ${service_list} --no-cache >${LOG_PATH}/docker_image_build.log docker images && sleep 1s } diff --git a/VisualQnA/tests/test_compose_on_xeon.sh b/VisualQnA/tests/test_compose_on_xeon.sh index 1775e8ade7..7ff1ccf0aa 100644 --- a/VisualQnA/tests/test_compose_on_xeon.sh +++ b/VisualQnA/tests/test_compose_on_xeon.sh @@ -24,13 +24,8 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - cd ../ - service_list="visualqna visualqna-ui lvm nginx vllm" + service_list="visualqna visualqna-ui lvm nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s } From 319b8ee3e753adc5937f542118985780814b778c Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Thu, 11 Sep 2025 16:35:03 +0800 Subject: [PATCH 18/78] Fix invalid source for oneclick workflow (#2265) Signed-off-by: ZePan110 --- .github/workflows/_run-one-click.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/_run-one-click.yml b/.github/workflows/_run-one-click.yml index a44822e2d2..149790a5e8 100644 --- a/.github/workflows/_run-one-click.yml +++ b/.github/workflows/_run-one-click.yml @@ -115,6 +115,7 @@ jobs: - name: Install dependencies run: | # Use flock to prevent apt conflicts when multiple instances run + sudo rm -f /etc/apt/sources.list.d/helm-stable-debian.list || true sudo flock /var/lib/dpkg/lock-frontend apt update sudo flock /var/lib/dpkg/lock-frontend apt install -y python3-pip From 5d8cffd40eb27e8e4138490677cb801c9f97c60e Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Tue, 16 Sep 2025 14:39:01 +0800 Subject: [PATCH 19/78] Use vllm-gaudi:1.22.0 image for AudioQnA and ChatQnA (#2266) Signed-off-by: ZePan110 --- AudioQnA/docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- AudioQnA/docker_image_build/build.yaml | 6 ------ AudioQnA/tests/test_compose_multilang_on_xeon.sh | 1 + AudioQnA/tests/test_compose_on_epyc.sh | 1 + AudioQnA/tests/test_compose_on_gaudi.sh | 9 ++------- AudioQnA/tests/test_compose_on_rocm.sh | 1 + AudioQnA/tests/test_compose_on_xeon.sh | 1 + AudioQnA/tests/test_compose_openeuler_on_xeon.sh | 1 + AudioQnA/tests/test_compose_tgi_on_epyc.sh | 1 + AudioQnA/tests/test_compose_tgi_on_gaudi.sh | 1 + AudioQnA/tests/test_compose_tgi_on_xeon.sh | 1 + AudioQnA/tests/test_compose_vllm_on_rocm.sh | 1 + ChatQnA/docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- .../docker_compose/intel/hpu/gaudi/compose_faqgen.yaml | 2 +- .../intel/hpu/gaudi/compose_guardrails.yaml | 4 ++-- .../intel/hpu/gaudi/compose_without_rerank.yaml | 2 +- ChatQnA/docker_image_build/build.yaml | 6 ------ ChatQnA/tests/test_compose_faqgen_on_gaudi.sh | 6 ++---- ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh | 1 + ChatQnA/tests/test_compose_guardrails_on_gaudi.sh | 5 +---- ChatQnA/tests/test_compose_mariadb_on_xeon.sh | 1 + ChatQnA/tests/test_compose_milvus_on_xeon.sh | 1 + ChatQnA/tests/test_compose_on_gaudi.sh | 5 +---- ChatQnA/tests/test_compose_on_xeon.sh | 2 +- ChatQnA/tests/test_compose_openeuler_on_xeon.sh | 2 +- ChatQnA/tests/test_compose_qdrant_on_xeon.sh | 2 +- ChatQnA/tests/test_compose_tgi_on_gaudi.sh | 1 + ChatQnA/tests/test_compose_tgi_on_xeon.sh | 2 +- ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh | 5 +---- ChatQnA/tests/test_compose_without_rerank_on_xeon.sh | 2 +- 30 files changed, 31 insertions(+), 46 deletions(-) diff --git a/AudioQnA/docker_compose/intel/hpu/gaudi/compose.yaml b/AudioQnA/docker_compose/intel/hpu/gaudi/compose.yaml index 3cfd68c9b1..b99050f722 100644 --- a/AudioQnA/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/AudioQnA/docker_compose/intel/hpu/gaudi/compose.yaml @@ -35,7 +35,7 @@ services: - SYS_NICE restart: unless-stopped vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.22.0 container_name: vllm-gaudi-service ports: - ${LLM_SERVER_PORT:-3006}:80 diff --git a/AudioQnA/docker_image_build/build.yaml b/AudioQnA/docker_image_build/build.yaml index 95dd19a322..696a2bfdb6 100644 --- a/AudioQnA/docker_image_build/build.yaml +++ b/AudioQnA/docker_image_build/build.yaml @@ -102,12 +102,6 @@ services: dockerfile: comps/third_parties/gpt-sovits/src/Dockerfile extends: audioqna image: ${REGISTRY:-opea}/gpt-sovits:${TAG:-latest} - vllm-gaudi: - build: - context: vllm-fork - dockerfile: Dockerfile.hpu - extends: audioqna - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} vllm-rocm: build: context: GenAIComps diff --git a/AudioQnA/tests/test_compose_multilang_on_xeon.sh b/AudioQnA/tests/test_compose_multilang_on_xeon.sh index d8af4c22d2..22e569dc26 100644 --- a/AudioQnA/tests/test_compose_multilang_on_xeon.sh +++ b/AudioQnA/tests/test_compose_multilang_on_xeon.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/AudioQnA/tests/test_compose_on_epyc.sh b/AudioQnA/tests/test_compose_on_epyc.sh index 10a7f2d76a..ac154e3632 100644 --- a/AudioQnA/tests/test_compose_on_epyc.sh +++ b/AudioQnA/tests/test_compose_on_epyc.sh @@ -37,6 +37,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/cpu/epyc/ export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/AudioQnA/tests/test_compose_on_gaudi.sh b/AudioQnA/tests/test_compose_on_gaudi.sh index c24f5ff82e..921bc1794f 100644 --- a/AudioQnA/tests/test_compose_on_gaudi.sh +++ b/AudioQnA/tests/test_compose_on_gaudi.sh @@ -25,14 +25,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/HabanaAI/vllm-fork.git - cd vllm-fork/ - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_FORK_VER}" - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="audioqna audioqna-ui whisper-gaudi speecht5-gaudi vllm-gaudi" + service_list="audioqna audioqna-ui whisper-gaudi speecht5-gaudi" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s @@ -41,6 +35,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/AudioQnA/tests/test_compose_on_rocm.sh b/AudioQnA/tests/test_compose_on_rocm.sh index 9456bf6bd1..8a235a6728 100644 --- a/AudioQnA/tests/test_compose_on_rocm.sh +++ b/AudioQnA/tests/test_compose_on_rocm.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/gpu/rocm/ + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers docker compose up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/AudioQnA/tests/test_compose_on_xeon.sh b/AudioQnA/tests/test_compose_on_xeon.sh index e25bc5a1d2..51ebb963f5 100644 --- a/AudioQnA/tests/test_compose_on_xeon.sh +++ b/AudioQnA/tests/test_compose_on_xeon.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/AudioQnA/tests/test_compose_openeuler_on_xeon.sh b/AudioQnA/tests/test_compose_openeuler_on_xeon.sh index 6a1728a573..3b30adfe2d 100644 --- a/AudioQnA/tests/test_compose_openeuler_on_xeon.sh +++ b/AudioQnA/tests/test_compose_openeuler_on_xeon.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/AudioQnA/tests/test_compose_tgi_on_epyc.sh b/AudioQnA/tests/test_compose_tgi_on_epyc.sh index b442a155a8..e9c671839e 100644 --- a/AudioQnA/tests/test_compose_tgi_on_epyc.sh +++ b/AudioQnA/tests/test_compose_tgi_on_epyc.sh @@ -38,6 +38,7 @@ function build_docker_images() { function start_services() { echo $WORKPATH cd $WORKPATH/docker_compose/amd/cpu/epyc/ + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/AudioQnA/tests/test_compose_tgi_on_gaudi.sh b/AudioQnA/tests/test_compose_tgi_on_gaudi.sh index dd68dfe770..0adc3a48a8 100644 --- a/AudioQnA/tests/test_compose_tgi_on_gaudi.sh +++ b/AudioQnA/tests/test_compose_tgi_on_gaudi.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/AudioQnA/tests/test_compose_tgi_on_xeon.sh b/AudioQnA/tests/test_compose_tgi_on_xeon.sh index bc1f945062..31f62b5211 100644 --- a/AudioQnA/tests/test_compose_tgi_on_xeon.sh +++ b/AudioQnA/tests/test_compose_tgi_on_xeon.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/AudioQnA/tests/test_compose_vllm_on_rocm.sh b/AudioQnA/tests/test_compose_vllm_on_rocm.sh index 925b0ba9d8..0d15500107 100644 --- a/AudioQnA/tests/test_compose_vllm_on_rocm.sh +++ b/AudioQnA/tests/test_compose_vllm_on_rocm.sh @@ -33,6 +33,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/gpu/rocm/ + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env_vllm.sh sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/ChatQnA/docker_compose/intel/hpu/gaudi/compose.yaml b/ChatQnA/docker_compose/intel/hpu/gaudi/compose.yaml index 23e7cee19b..cc4048c8d2 100644 --- a/ChatQnA/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/ChatQnA/docker_compose/intel/hpu/gaudi/compose.yaml @@ -92,7 +92,7 @@ services: MAX_WARMUP_SEQUENCE_LENGTH: 512 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: ${REGISTRY:-opea}/vllm-gaudi:1.22.0 container_name: vllm-gaudi-server ports: - "8007:80" diff --git a/ChatQnA/docker_compose/intel/hpu/gaudi/compose_faqgen.yaml b/ChatQnA/docker_compose/intel/hpu/gaudi/compose_faqgen.yaml index 3fa1b5c4af..182c2b5bc2 100644 --- a/ChatQnA/docker_compose/intel/hpu/gaudi/compose_faqgen.yaml +++ b/ChatQnA/docker_compose/intel/hpu/gaudi/compose_faqgen.yaml @@ -85,7 +85,7 @@ services: MAX_WARMUP_SEQUENCE_LENGTH: 512 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.22.0 container_name: vllm-gaudi-server ports: - ${LLM_ENDPOINT_PORT:-8007}:80 diff --git a/ChatQnA/docker_compose/intel/hpu/gaudi/compose_guardrails.yaml b/ChatQnA/docker_compose/intel/hpu/gaudi/compose_guardrails.yaml index e2ec071f1e..69b1339e0c 100644 --- a/ChatQnA/docker_compose/intel/hpu/gaudi/compose_guardrails.yaml +++ b/ChatQnA/docker_compose/intel/hpu/gaudi/compose_guardrails.yaml @@ -32,7 +32,7 @@ services: retries: 50 restart: unless-stopped vllm-guardrails-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.22.0 container_name: vllm-guardrails-server ports: - "8088:80" @@ -130,7 +130,7 @@ services: MAX_WARMUP_SEQUENCE_LENGTH: 512 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: ${REGISTRY:-opea}/vllm-gaudi:1.22.0 container_name: vllm-gaudi-server ports: - "8008:80" diff --git a/ChatQnA/docker_compose/intel/hpu/gaudi/compose_without_rerank.yaml b/ChatQnA/docker_compose/intel/hpu/gaudi/compose_without_rerank.yaml index 9c38b38772..017bf8d204 100644 --- a/ChatQnA/docker_compose/intel/hpu/gaudi/compose_without_rerank.yaml +++ b/ChatQnA/docker_compose/intel/hpu/gaudi/compose_without_rerank.yaml @@ -65,7 +65,7 @@ services: RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS" restart: unless-stopped vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.22.0 container_name: vllm-gaudi-server ports: - "8007:80" diff --git a/ChatQnA/docker_image_build/build.yaml b/ChatQnA/docker_image_build/build.yaml index f7f831e67f..2a5f8ccd62 100644 --- a/ChatQnA/docker_image_build/build.yaml +++ b/ChatQnA/docker_image_build/build.yaml @@ -114,12 +114,6 @@ services: context: GenAIComps dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest} - vllm-gaudi: - build: - context: vllm-fork - dockerfile: Dockerfile.hpu - extends: chatqna - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} nginx: build: context: GenAIComps diff --git a/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh b/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh index 58ab7526c2..fe3e46809b 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh @@ -23,12 +23,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever llm-faqgen vllm-gaudi nginx" + service_list="chatqna chatqna-ui dataprep retriever llm-faqgen nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s @@ -36,6 +33,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env_faqgen.sh # Start Docker Containers diff --git a/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh b/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh index e9868e0052..e4fe73f9f1 100644 --- a/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh @@ -34,6 +34,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env_faqgen.sh # Start Docker Containers diff --git a/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh b/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh index da8bc25b48..0e261159cb 100644 --- a/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh @@ -23,12 +23,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi guardrails nginx" + service_list="chatqna chatqna-ui dataprep retriever guardrails nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_mariadb_on_xeon.sh b/ChatQnA/tests/test_compose_mariadb_on_xeon.sh index d1e0edc773..3c369357e4 100644 --- a/ChatQnA/tests/test_compose_mariadb_on_xeon.sh +++ b/ChatQnA/tests/test_compose_mariadb_on_xeon.sh @@ -34,6 +34,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon export MARIADB_PASSWORD="test" + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env_mariadb.sh # Start Docker Containers diff --git a/ChatQnA/tests/test_compose_milvus_on_xeon.sh b/ChatQnA/tests/test_compose_milvus_on_xeon.sh index 254e7ed11b..31e470f429 100644 --- a/ChatQnA/tests/test_compose_milvus_on_xeon.sh +++ b/ChatQnA/tests/test_compose_milvus_on_xeon.sh @@ -34,6 +34,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ export LOGFLAG=true + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers diff --git a/ChatQnA/tests/test_compose_on_gaudi.sh b/ChatQnA/tests/test_compose_on_gaudi.sh index 221f59c6e5..090f907a2d 100644 --- a/ChatQnA/tests/test_compose_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_on_gaudi.sh @@ -23,12 +23,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_on_xeon.sh b/ChatQnA/tests/test_compose_on_xeon.sh index 579dcde2db..21a7b7676f 100644 --- a/ChatQnA/tests/test_compose_on_xeon.sh +++ b/ChatQnA/tests/test_compose_on_xeon.sh @@ -33,7 +33,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers diff --git a/ChatQnA/tests/test_compose_openeuler_on_xeon.sh b/ChatQnA/tests/test_compose_openeuler_on_xeon.sh index 5f641f5d62..ed6e7ed0d4 100644 --- a/ChatQnA/tests/test_compose_openeuler_on_xeon.sh +++ b/ChatQnA/tests/test_compose_openeuler_on_xeon.sh @@ -33,7 +33,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers diff --git a/ChatQnA/tests/test_compose_qdrant_on_xeon.sh b/ChatQnA/tests/test_compose_qdrant_on_xeon.sh index 0889c4ad4d..765fb90e33 100644 --- a/ChatQnA/tests/test_compose_qdrant_on_xeon.sh +++ b/ChatQnA/tests/test_compose_qdrant_on_xeon.sh @@ -33,7 +33,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - + export no_proxy="localhost,127.0.0.1,$ip_address" export INDEX_NAME="rag-qdrant" source set_env.sh diff --git a/ChatQnA/tests/test_compose_tgi_on_gaudi.sh b/ChatQnA/tests/test_compose_tgi_on_gaudi.sh index 356914ea1c..28df0e83aa 100644 --- a/ChatQnA/tests/test_compose_tgi_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_tgi_on_gaudi.sh @@ -35,6 +35,7 @@ function start_services() { export NON_INTERACTIVE=true export host_ip=${ip_address} export telemetry=yes + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers diff --git a/ChatQnA/tests/test_compose_tgi_on_xeon.sh b/ChatQnA/tests/test_compose_tgi_on_xeon.sh index b2a56091a9..229151d04d 100644 --- a/ChatQnA/tests/test_compose_tgi_on_xeon.sh +++ b/ChatQnA/tests/test_compose_tgi_on_xeon.sh @@ -32,7 +32,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers diff --git a/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh b/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh index b6fb222b39..24f72db727 100644 --- a/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh @@ -23,12 +23,9 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi nginx" + service_list="chatqna chatqna-ui dataprep retriever nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh b/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh index 91a6e5b656..ad8d9c2f43 100644 --- a/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh +++ b/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh @@ -33,7 +33,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers From 159c3d06c24f4ae10a4798c61d7bcc65312901ab Mon Sep 17 00:00:00 2001 From: zhihang Date: Tue, 16 Sep 2025 14:50:25 +0800 Subject: [PATCH 20/78] Bump the version of `openeuler/vllm-cpu` to 0.10.1 (#2269) Signed-off-by: zhihang --- AudioQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml | 3 +-- ChatQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml | 3 +-- CodeGen/docker_compose/intel/cpu/xeon/compose_openeuler.yaml | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/AudioQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml b/AudioQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml index f977cf8ac8..5ae931a78f 100644 --- a/AudioQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml +++ b/AudioQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml @@ -25,14 +25,13 @@ services: https_proxy: ${https_proxy} restart: unless-stopped vllm-service: - image: openeuler/vllm-cpu:0.9.1-oe2403lts + image: openeuler/vllm-cpu:0.10.1-oe2403lts container_name: vllm-service ports: - ${LLM_SERVER_PORT:-3006}:80 volumes: - "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub" shm_size: 128g - privileged: true environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml b/ChatQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml index 44fef78971..679ddd2fd8 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml +++ b/ChatQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml @@ -88,14 +88,13 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: openeuler/vllm-cpu:0.9.1-oe2403lts + image: openeuler/vllm-cpu:0.10.1-oe2403lts container_name: vllm-service ports: - "9009:80" volumes: - "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub" shm_size: 128g - privileged: true environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} diff --git a/CodeGen/docker_compose/intel/cpu/xeon/compose_openeuler.yaml b/CodeGen/docker_compose/intel/cpu/xeon/compose_openeuler.yaml index c4f6d6791d..fd2b9fc9ba 100644 --- a/CodeGen/docker_compose/intel/cpu/xeon/compose_openeuler.yaml +++ b/CodeGen/docker_compose/intel/cpu/xeon/compose_openeuler.yaml @@ -4,14 +4,13 @@ services: vllm-service: - image: openeuler/vllm-cpu:0.9.1-oe2403lts + image: openeuler/vllm-cpu:0.10.1-oe2403lts container_name: vllm-server ports: - "8028:80" volumes: - "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub" shm_size: 1g - privileged: true environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} From ff60f4a38c4f43e4bc427be38ba2230abedd8f5f Mon Sep 17 00:00:00 2001 From: zhihang Date: Wed, 17 Sep 2025 09:48:00 +0800 Subject: [PATCH 21/78] Add openEuler Helm values for ChatQnA (#2263) Signed-off-by: zhihang --- .../kubernetes/helm/cpu-openeuler-values.yaml | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 ChatQnA/kubernetes/helm/cpu-openeuler-values.yaml diff --git a/ChatQnA/kubernetes/helm/cpu-openeuler-values.yaml b/ChatQnA/kubernetes/helm/cpu-openeuler-values.yaml new file mode 100644 index 0000000000..5206d1abf9 --- /dev/null +++ b/ChatQnA/kubernetes/helm/cpu-openeuler-values.yaml @@ -0,0 +1,58 @@ +# Copyright (C) 2025 Huawei Technologies Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +# This file is based on cpu-values.yaml and overrides image tags to 'latest-openeuler' +# for all enabled services to run on openEuler. + +# Overrides for the main chart image +image: + tag: latest-openeuler + +# Overrides from cpu-values.yaml +vllm: + image: + repository: openeuler/vllm-cpu + tag: 0.10.1-oe2403lts + LLM_MODEL_ID: meta-llama/Meta-Llama-3-8B-Instruct + + # Uncomment the following model specific settings for DeepSeek models + #VLLM_CPU_KVCACHE_SPACE: 40 + #resources: + # requests: + # memory: 60Gi # 40G for KV cache, and 20G for DeepSeek-R1-Distill-Qwen-7B, need to adjust it for other models + +# Overrides for subchart images +# Based on the default values in opea-project/GenAIInfra/helm-charts/chatqna/values.yaml, +# the following services are enabled by default. + +# data-prep service +data-prep: + image: + tag: latest-openeuler + +# retriever-usvc service +retriever-usvc: + image: + tag: latest-openeuler + +# tei-rerank service +teirerank: + image: + repository: openeuler/text-embeddings-inference-cpu + tag: 1.7.0-oe2403lts + +# tei service +tei: + image: + repository: openeuler/text-embeddings-inference-cpu + tag: 1.7.0-oe2403lts + +# nginx service +nginx: + image: + tag: latest-openeuler + +# chatqna-ui service +chatqna-ui: + image: + tag: latest-openeuler From b3636358d3d53ccfbc3aadb03afd5b94b248cf4c Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Thu, 18 Sep 2025 16:03:40 +0800 Subject: [PATCH 22/78] Use vllm-gaudi:1.22.0 image and add no_proxy (#2270) Signed-off-by: ZePan110 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/env/_build_image.sh | 6 ------ .github/workflows/_build_image.yml | 7 ------- .github/workflows/_trivy-scan.yml | 7 ------- ...on.yml => daily-update-vllm-version.yml.disabled} | 0 CodeGen/tests/test_compose_on_epyc.sh | 2 +- CodeGen/tests/test_compose_on_gaudi.sh | 2 +- CodeGen/tests/test_compose_on_xeon.sh | 2 +- CodeGen/tests/test_compose_openeuler_on_xeon.sh | 2 +- CodeGen/tests/test_compose_tgi_on_epyc.sh | 2 +- CodeTrans/tests/test_compose_on_epyc.sh | 2 +- CodeTrans/tests/test_compose_on_gaudi.sh | 1 + CodeTrans/tests/test_compose_on_xeon.sh | 2 +- CodeTrans/tests/test_compose_tgi_on_epyc.sh | 2 +- CodeTrans/tests/test_compose_tgi_on_gaudi.sh | 2 +- CodeTrans/tests/test_compose_tgi_on_xeon.sh | 2 +- DBQnA/tests/test_compose_on_xeon.sh | 1 + .../docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- DeepResearchAgent/docker_image_build/build.yaml | 6 ------ DeepResearchAgent/tests/test_compose_on_gaudi.sh | 4 +--- .../tests/test_compose_milvus_on_gaudi.sh | 1 + .../tests/test_compose_milvus_on_xeon.sh | 1 + DocIndexRetriever/tests/test_compose_on_gaudi.sh | 1 + DocIndexRetriever/tests/test_compose_on_xeon.sh | 1 + .../tests/test_compose_without_rerank_on_xeon.sh | 1 + DocSum/tests/test_compose_on_epyc.sh | 1 + DocSum/tests/test_compose_on_gaudi.sh | 1 + DocSum/tests/test_compose_on_xeon.sh | 1 + DocSum/tests/test_compose_tgi_on_epyc.sh | 1 + DocSum/tests/test_compose_tgi_on_gaudi.sh | 1 + DocSum/tests/test_compose_tgi_on_xeon.sh | 1 + .../docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- GraphRAG/tests/test_compose_on_gaudi.sh | 2 +- .../docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- HybridRAG/docker_image_build/build.yaml | 6 ------ HybridRAG/tests/test_compose_on_gaudi.sh | 12 ++---------- MultimodalQnA/tests/test_compose_milvus_on_epyc.sh | 1 + MultimodalQnA/tests/test_compose_milvus_on_xeon.sh | 1 + MultimodalQnA/tests/test_compose_on_epyc.sh | 1 + MultimodalQnA/tests/test_compose_on_gaudi.sh | 2 +- MultimodalQnA/tests/test_compose_on_xeon.sh | 1 + MultimodalQnA/tests/test_compose_vllm_on_rocm.sh | 1 + SearchQnA/tests/test_compose_on_epyc.sh | 1 + SearchQnA/tests/test_compose_on_gaudi.sh | 1 + SearchQnA/tests/test_compose_on_xeon.sh | 1 + Translation/tests/test_compose_on_epyc.sh | 1 + Translation/tests/test_compose_on_gaudi.sh | 1 + Translation/tests/test_compose_on_xeon.sh | 1 + VideoQnA/tests/test_compose_on_xeon.sh | 2 +- VisualQnA/tests/test_compose_on_epyc.sh | 1 + VisualQnA/tests/test_compose_on_xeon.sh | 1 + VisualQnA/tests/test_compose_tgi_on_epyc.sh | 1 + VisualQnA/tests/test_compose_tgi_on_gaudi.sh | 1 + VisualQnA/tests/test_compose_tgi_on_xeon.sh | 1 + 53 files changed, 48 insertions(+), 61 deletions(-) delete mode 100644 .github/env/_build_image.sh rename .github/workflows/{daily-update-vllm-version.yml => daily-update-vllm-version.yml.disabled} (100%) diff --git a/.github/env/_build_image.sh b/.github/env/_build_image.sh deleted file mode 100644 index 61b0d902ed..0000000000 --- a/.github/env/_build_image.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -# Copyright (C) 2025 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -export VLLM_VER=v0.10.0 -export VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 diff --git a/.github/workflows/_build_image.yml b/.github/workflows/_build_image.yml index a91c522847..ebfa0ea22c 100644 --- a/.github/workflows/_build_image.yml +++ b/.github/workflows/_build_image.yml @@ -87,13 +87,6 @@ jobs: run: | cd ${{ github.workspace }}/${{ inputs.example }}/docker_image_build docker_compose_path=${{ github.workspace }}/${{ inputs.example }}/docker_image_build/build.yaml - source ${{ github.workspace }}/.github/env/_build_image.sh - if [[ $(grep -c "vllm:" ${docker_compose_path}) != 0 ]]; then - git clone -b ${VLLM_VER} --single-branch https://github.com/vllm-project/vllm.git - fi - if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then - git clone -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git - fi git clone --depth 1 --branch ${{ inputs.opea_branch }} https://github.com/opea-project/GenAIComps.git cd GenAIComps && git rev-parse HEAD && cd ../ diff --git a/.github/workflows/_trivy-scan.yml b/.github/workflows/_trivy-scan.yml index 0ad85891ea..05dd74d4b8 100644 --- a/.github/workflows/_trivy-scan.yml +++ b/.github/workflows/_trivy-scan.yml @@ -50,13 +50,6 @@ jobs: sudo apt-get install -y jq - name: Clone Required Repo run: | - source ${{ github.workspace }}/.github/env/_build_image.sh - if [[ "${{ inputs.image }}" == "vllm" ]]; then - git clone -b ${VLLM_VER} --single-branch https://github.com/vllm-project/vllm.git - fi - if [[ "${{ inputs.image }}" == "vllm-gaudi" ]]; then - git clone -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git - fi git clone --depth 1 https://github.com/opea-project/GenAIComps.git cd GenAIComps && git rev-parse HEAD && cd ../ - name: Pull Image diff --git a/.github/workflows/daily-update-vllm-version.yml b/.github/workflows/daily-update-vllm-version.yml.disabled similarity index 100% rename from .github/workflows/daily-update-vllm-version.yml rename to .github/workflows/daily-update-vllm-version.yml.disabled diff --git a/CodeGen/tests/test_compose_on_epyc.sh b/CodeGen/tests/test_compose_on_epyc.sh index db8498b369..389e70b54c 100644 --- a/CodeGen/tests/test_compose_on_epyc.sh +++ b/CodeGen/tests/test_compose_on_epyc.sh @@ -40,7 +40,7 @@ function build_docker_images() { function start_services() { local compose_file="$1" local llm_container_name="$2" - + export no_proxy="localhost,127.0.0.1,$ip_address" cd $WORKPATH/docker_compose/amd/cpu/epyc/ # Start Docker Containers diff --git a/CodeGen/tests/test_compose_on_gaudi.sh b/CodeGen/tests/test_compose_on_gaudi.sh index 516624827f..9e0339f07a 100644 --- a/CodeGen/tests/test_compose_on_gaudi.sh +++ b/CodeGen/tests/test_compose_on_gaudi.sh @@ -41,7 +41,7 @@ function build_docker_images() { function start_services() { local compose_file="$1" local llm_container_name="$2" - + export no_proxy="localhost,127.0.0.1,$ip_address" cd $WORKPATH/docker_compose/intel/hpu/gaudi # Start Docker Containers diff --git a/CodeGen/tests/test_compose_on_xeon.sh b/CodeGen/tests/test_compose_on_xeon.sh index adc21215b5..5be0455d74 100644 --- a/CodeGen/tests/test_compose_on_xeon.sh +++ b/CodeGen/tests/test_compose_on_xeon.sh @@ -38,7 +38,7 @@ function build_docker_images() { function start_services() { local compose_file="$1" local llm_container_name="$2" - + export no_proxy="localhost,127.0.0.1,$ip_address" cd $WORKPATH/docker_compose/intel/cpu/xeon/ # Start Docker Containers diff --git a/CodeGen/tests/test_compose_openeuler_on_xeon.sh b/CodeGen/tests/test_compose_openeuler_on_xeon.sh index 81d4bf4b0b..80fb6754f7 100644 --- a/CodeGen/tests/test_compose_openeuler_on_xeon.sh +++ b/CodeGen/tests/test_compose_openeuler_on_xeon.sh @@ -38,7 +38,7 @@ function build_docker_images() { function start_services() { local compose_file="$1" local llm_container_name="$2" - + export no_proxy="localhost,127.0.0.1,$ip_address" cd $WORKPATH/docker_compose/intel/cpu/xeon/ # Start Docker Containers diff --git a/CodeGen/tests/test_compose_tgi_on_epyc.sh b/CodeGen/tests/test_compose_tgi_on_epyc.sh index 2652e5b477..bfa3513df8 100644 --- a/CodeGen/tests/test_compose_tgi_on_epyc.sh +++ b/CodeGen/tests/test_compose_tgi_on_epyc.sh @@ -40,7 +40,7 @@ function build_docker_images() { function start_services() { local compose_file="$1" local llm_container_name="$2" - + export no_proxy="localhost,127.0.0.1,$ip_address" cd $WORKPATH/docker_compose/amd/cpu/epyc/ # Start Docker Containers diff --git a/CodeTrans/tests/test_compose_on_epyc.sh b/CodeTrans/tests/test_compose_on_epyc.sh index 3a77d62919..ba0af9b252 100644 --- a/CodeTrans/tests/test_compose_on_epyc.sh +++ b/CodeTrans/tests/test_compose_on_epyc.sh @@ -38,7 +38,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/cpu/epyc export HF_TOKEN=${HF_TOKEN} - + export no_proxy="localhost,127.0.0.1,$ip_address" export NGINX_PORT=80 source set_env.sh diff --git a/CodeTrans/tests/test_compose_on_gaudi.sh b/CodeTrans/tests/test_compose_on_gaudi.sh index 07af411cc1..759636fbd4 100644 --- a/CodeTrans/tests/test_compose_on_gaudi.sh +++ b/CodeTrans/tests/test_compose_on_gaudi.sh @@ -40,6 +40,7 @@ function start_services() { cd $WORKPATH/docker_compose/intel export HF_TOKEN=${HF_TOKEN} export NGINX_PORT=80 + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh cd hpu/gaudi diff --git a/CodeTrans/tests/test_compose_on_xeon.sh b/CodeTrans/tests/test_compose_on_xeon.sh index 86d7d5b8e8..50a76968aa 100644 --- a/CodeTrans/tests/test_compose_on_xeon.sh +++ b/CodeTrans/tests/test_compose_on_xeon.sh @@ -35,7 +35,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel export HF_TOKEN=${HF_TOKEN} - + export no_proxy="localhost,127.0.0.1,$ip_address" export NGINX_PORT=80 source set_env.sh cd cpu/xeon/ diff --git a/CodeTrans/tests/test_compose_tgi_on_epyc.sh b/CodeTrans/tests/test_compose_tgi_on_epyc.sh index 2eb464c485..d33ba0a933 100644 --- a/CodeTrans/tests/test_compose_tgi_on_epyc.sh +++ b/CodeTrans/tests/test_compose_tgi_on_epyc.sh @@ -39,7 +39,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/cpu/epyc/ export HF_TOKEN=${HF_TOKEN} - + export no_proxy="localhost,127.0.0.1,$ip_address" export NGINX_PORT=80 source set_env.sh diff --git a/CodeTrans/tests/test_compose_tgi_on_gaudi.sh b/CodeTrans/tests/test_compose_tgi_on_gaudi.sh index 129e677149..dc11b90a0e 100644 --- a/CodeTrans/tests/test_compose_tgi_on_gaudi.sh +++ b/CodeTrans/tests/test_compose_tgi_on_gaudi.sh @@ -36,7 +36,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel export HF_TOKEN=${HF_TOKEN} - + export no_proxy="localhost,127.0.0.1,$ip_address" export NGINX_PORT=80 source set_env.sh cd hpu/gaudi/ diff --git a/CodeTrans/tests/test_compose_tgi_on_xeon.sh b/CodeTrans/tests/test_compose_tgi_on_xeon.sh index aacd5dfda5..4a23731cbb 100644 --- a/CodeTrans/tests/test_compose_tgi_on_xeon.sh +++ b/CodeTrans/tests/test_compose_tgi_on_xeon.sh @@ -36,7 +36,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel export HF_TOKEN=${HF_TOKEN} - + export no_proxy="localhost,127.0.0.1,$ip_address" export NGINX_PORT=80 source set_env.sh cd cpu/xeon/ diff --git a/DBQnA/tests/test_compose_on_xeon.sh b/DBQnA/tests/test_compose_on_xeon.sh index c410cc48f8..97cfbab7a4 100755 --- a/DBQnA/tests/test_compose_on_xeon.sh +++ b/DBQnA/tests/test_compose_on_xeon.sh @@ -29,6 +29,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh # Start Docker Containers diff --git a/DeepResearchAgent/docker_compose/intel/hpu/gaudi/compose.yaml b/DeepResearchAgent/docker_compose/intel/hpu/gaudi/compose.yaml index dc7a05c270..d49af13a94 100644 --- a/DeepResearchAgent/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/DeepResearchAgent/docker_compose/intel/hpu/gaudi/compose.yaml @@ -20,7 +20,7 @@ x-common-agent-environment: services: vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.22.0 container_name: vllm-gaudi-server ports: - "8000:8000" diff --git a/DeepResearchAgent/docker_image_build/build.yaml b/DeepResearchAgent/docker_image_build/build.yaml index 5ac2e17a90..09ef66da26 100644 --- a/DeepResearchAgent/docker_image_build/build.yaml +++ b/DeepResearchAgent/docker_image_build/build.yaml @@ -13,9 +13,3 @@ services: context: ../ dockerfile: ./Dockerfile image: ${REGISTRY:-opea}/deep-research-agent:${TAG:-latest} - vllm-gaudi: - build: - context: vllm-fork - dockerfile: Dockerfile.hpu - extends: deep-research-agent - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} diff --git a/DeepResearchAgent/tests/test_compose_on_gaudi.sh b/DeepResearchAgent/tests/test_compose_on_gaudi.sh index c180640201..e76a66b9cc 100644 --- a/DeepResearchAgent/tests/test_compose_on_gaudi.sh +++ b/DeepResearchAgent/tests/test_compose_on_gaudi.sh @@ -25,9 +25,6 @@ function build_docker_images() { echo "GenAIComps test commit is $(git rev-parse HEAD)" docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log @@ -37,6 +34,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh # Start Docker Containers diff --git a/DocIndexRetriever/tests/test_compose_milvus_on_gaudi.sh b/DocIndexRetriever/tests/test_compose_milvus_on_gaudi.sh index 46c5f22f0e..5f3f90842b 100644 --- a/DocIndexRetriever/tests/test_compose_milvus_on_gaudi.sh +++ b/DocIndexRetriever/tests/test_compose_milvus_on_gaudi.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { echo "Starting Docker Services...." cd $WORKPATH/docker_compose/intel/hpu/gaudi + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh # Start Docker Containers diff --git a/DocIndexRetriever/tests/test_compose_milvus_on_xeon.sh b/DocIndexRetriever/tests/test_compose_milvus_on_xeon.sh index 37bf681dcf..80f18fa515 100755 --- a/DocIndexRetriever/tests/test_compose_milvus_on_xeon.sh +++ b/DocIndexRetriever/tests/test_compose_milvus_on_xeon.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { echo "Starting Docker Services...." cd $WORKPATH/docker_compose/intel/cpu/xeon + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh # Start Docker Containers diff --git a/DocIndexRetriever/tests/test_compose_on_gaudi.sh b/DocIndexRetriever/tests/test_compose_on_gaudi.sh index 11541eca1e..e65dbb8a15 100644 --- a/DocIndexRetriever/tests/test_compose_on_gaudi.sh +++ b/DocIndexRetriever/tests/test_compose_on_gaudi.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { echo "Starting Docker Services...." cd $WORKPATH/docker_compose/intel/hpu/gaudi + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh # Start Docker Containers diff --git a/DocIndexRetriever/tests/test_compose_on_xeon.sh b/DocIndexRetriever/tests/test_compose_on_xeon.sh index 229e47efea..92289e8bf5 100644 --- a/DocIndexRetriever/tests/test_compose_on_xeon.sh +++ b/DocIndexRetriever/tests/test_compose_on_xeon.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { echo "Starting Docker Services...." cd $WORKPATH/docker_compose/intel/cpu/xeon + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh # Start Docker Containers diff --git a/DocIndexRetriever/tests/test_compose_without_rerank_on_xeon.sh b/DocIndexRetriever/tests/test_compose_without_rerank_on_xeon.sh index 37c477b2ad..96a8da8f6d 100644 --- a/DocIndexRetriever/tests/test_compose_without_rerank_on_xeon.sh +++ b/DocIndexRetriever/tests/test_compose_without_rerank_on_xeon.sh @@ -39,6 +39,7 @@ function build_docker_images() { function start_services() { echo "Starting Docker Services...." cd $WORKPATH/docker_compose/intel/cpu/xeon + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh # Start Docker Containers diff --git a/DocSum/tests/test_compose_on_epyc.sh b/DocSum/tests/test_compose_on_epyc.sh index 79989d0ef7..a67e05c681 100644 --- a/DocSum/tests/test_compose_on_epyc.sh +++ b/DocSum/tests/test_compose_on_epyc.sh @@ -47,6 +47,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/cpu/epyc/ + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose.yaml up -d >${LOG_PATH}/start_services_with_compose.log sleep 1m } diff --git a/DocSum/tests/test_compose_on_gaudi.sh b/DocSum/tests/test_compose_on_gaudi.sh index 2e13e41c9d..033c532609 100644 --- a/DocSum/tests/test_compose_on_gaudi.sh +++ b/DocSum/tests/test_compose_on_gaudi.sh @@ -59,6 +59,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose.yaml up -d > ${LOG_PATH}/start_services_with_compose.log sleep 2m } diff --git a/DocSum/tests/test_compose_on_xeon.sh b/DocSum/tests/test_compose_on_xeon.sh index 2b897853f9..6e252ff9c6 100644 --- a/DocSum/tests/test_compose_on_xeon.sh +++ b/DocSum/tests/test_compose_on_xeon.sh @@ -45,6 +45,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose.yaml up -d > ${LOG_PATH}/start_services_with_compose.log sleep 1m } diff --git a/DocSum/tests/test_compose_tgi_on_epyc.sh b/DocSum/tests/test_compose_tgi_on_epyc.sh index b1caf7685c..33442bb270 100644 --- a/DocSum/tests/test_compose_tgi_on_epyc.sh +++ b/DocSum/tests/test_compose_tgi_on_epyc.sh @@ -47,6 +47,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/cpu/epyc/ + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log sleep 1m } diff --git a/DocSum/tests/test_compose_tgi_on_gaudi.sh b/DocSum/tests/test_compose_tgi_on_gaudi.sh index c6133515cf..3ef9a92cff 100644 --- a/DocSum/tests/test_compose_tgi_on_gaudi.sh +++ b/DocSum/tests/test_compose_tgi_on_gaudi.sh @@ -45,6 +45,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log sleep 1m } diff --git a/DocSum/tests/test_compose_tgi_on_xeon.sh b/DocSum/tests/test_compose_tgi_on_xeon.sh index e107bfca63..04cad66c37 100644 --- a/DocSum/tests/test_compose_tgi_on_xeon.sh +++ b/DocSum/tests/test_compose_tgi_on_xeon.sh @@ -45,6 +45,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log sleep 1m } diff --git a/FinanceAgent/docker_compose/intel/hpu/gaudi/compose.yaml b/FinanceAgent/docker_compose/intel/hpu/gaudi/compose.yaml index 8c6d579c3c..2703fa9be0 100644 --- a/FinanceAgent/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/FinanceAgent/docker_compose/intel/hpu/gaudi/compose.yaml @@ -24,7 +24,7 @@ x-common-agent-environment: services: vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.22.0 container_name: vllm-gaudi-server ports: - "8086:8000" diff --git a/GraphRAG/tests/test_compose_on_gaudi.sh b/GraphRAG/tests/test_compose_on_gaudi.sh index b6852c962a..674001da13 100755 --- a/GraphRAG/tests/test_compose_on_gaudi.sh +++ b/GraphRAG/tests/test_compose_on_gaudi.sh @@ -35,7 +35,7 @@ function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi source set_env.sh unset OPENAI_API_KEY - + export no_proxy="localhost,127.0.0.1,$ip_address" # Start Docker Containers docker compose -f compose.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/HybridRAG/docker_compose/intel/hpu/gaudi/compose.yaml b/HybridRAG/docker_compose/intel/hpu/gaudi/compose.yaml index 1689f6a8fe..43d905cfff 100644 --- a/HybridRAG/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/HybridRAG/docker_compose/intel/hpu/gaudi/compose.yaml @@ -140,7 +140,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + image: public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v0.10.1 container_name: vllm-service ports: - "9009:80" diff --git a/HybridRAG/docker_image_build/build.yaml b/HybridRAG/docker_image_build/build.yaml index 75dbef2c35..91bab4abde 100644 --- a/HybridRAG/docker_image_build/build.yaml +++ b/HybridRAG/docker_image_build/build.yaml @@ -19,12 +19,6 @@ services: dockerfile: comps/text2cypher/src/Dockerfile.intel_hpu extends: hybridrag image: ${REGISTRY:-opea}/text2cypher-gaudi:${TAG:-latest} - vllm: - build: - context: vllm - dockerfile: docker/Dockerfile.cpu - extends: hybridrag - image: ${REGISTRY:-opea}/vllm:${TAG:-latest} dataprep: build: context: GenAIComps diff --git a/HybridRAG/tests/test_compose_on_gaudi.sh b/HybridRAG/tests/test_compose_on_gaudi.sh index c870dbecd4..78e6ed161d 100755 --- a/HybridRAG/tests/test_compose_on_gaudi.sh +++ b/HybridRAG/tests/test_compose_on_gaudi.sh @@ -27,16 +27,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/vllm-project/vllm.git && cd vllm - VLLM_VER=v0.10.0 - - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - # make sure NOT change the pwd - cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="hybridrag hybridrag-ui dataprep retriever text2cypher-gaudi vllm nginx" + service_list="hybridrag hybridrag-ui dataprep retriever text2cypher-gaudi nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log 2>&1 docker images && sleep 1s @@ -44,7 +36,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - + export no_proxy="localhost,127.0.0.1,$ip_address" # Start Docker Containers docker compose -f compose.yaml up -d > ${LOG_PATH}/start_services_with_compose.log n=0 diff --git a/MultimodalQnA/tests/test_compose_milvus_on_epyc.sh b/MultimodalQnA/tests/test_compose_milvus_on_epyc.sh index 46b19822d8..5ab2354ffa 100644 --- a/MultimodalQnA/tests/test_compose_milvus_on_epyc.sh +++ b/MultimodalQnA/tests/test_compose_milvus_on_epyc.sh @@ -73,6 +73,7 @@ function setup_env() { function start_services() { echo "Starting services..." cd $WORKPATH/docker_compose/amd/cpu/epyc + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose_milvus.yaml up -d > ${LOG_PATH}/start_services_with_compose.log sleep 2m echo "Services started." diff --git a/MultimodalQnA/tests/test_compose_milvus_on_xeon.sh b/MultimodalQnA/tests/test_compose_milvus_on_xeon.sh index f63877e6f8..4c7a1f6120 100644 --- a/MultimodalQnA/tests/test_compose_milvus_on_xeon.sh +++ b/MultimodalQnA/tests/test_compose_milvus_on_xeon.sh @@ -71,6 +71,7 @@ function setup_env() { function start_services() { echo "Starting services..." cd $WORKPATH/docker_compose/intel/cpu/xeon + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose_milvus.yaml up -d > ${LOG_PATH}/start_services_with_compose.log sleep 2m echo "Services started." diff --git a/MultimodalQnA/tests/test_compose_on_epyc.sh b/MultimodalQnA/tests/test_compose_on_epyc.sh index fe753930b3..e23c018906 100644 --- a/MultimodalQnA/tests/test_compose_on_epyc.sh +++ b/MultimodalQnA/tests/test_compose_on_epyc.sh @@ -72,6 +72,7 @@ function setup_env() { function start_services() { echo "Starting services..." cd $WORKPATH/docker_compose/amd/cpu/epyc + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose.yaml up -d > ${LOG_PATH}/start_services_with_compose.log sleep 2m echo "Services started." diff --git a/MultimodalQnA/tests/test_compose_on_gaudi.sh b/MultimodalQnA/tests/test_compose_on_gaudi.sh index 4f9c637936..107a5212de 100644 --- a/MultimodalQnA/tests/test_compose_on_gaudi.sh +++ b/MultimodalQnA/tests/test_compose_on_gaudi.sh @@ -67,7 +67,7 @@ function setup_env() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - + export no_proxy="localhost,127.0.0.1,$ip_address" # Start Docker Containers docker compose -f compose.yaml up -d > ${LOG_PATH}/start_services_with_compose.log sleep 1m diff --git a/MultimodalQnA/tests/test_compose_on_xeon.sh b/MultimodalQnA/tests/test_compose_on_xeon.sh index 47a5051557..f7de0bfef6 100644 --- a/MultimodalQnA/tests/test_compose_on_xeon.sh +++ b/MultimodalQnA/tests/test_compose_on_xeon.sh @@ -70,6 +70,7 @@ function setup_env() { function start_services() { echo "Starting services..." cd $WORKPATH/docker_compose/intel/cpu/xeon + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose.yaml up -d > ${LOG_PATH}/start_services_with_compose.log sleep 2m echo "Services started." diff --git a/MultimodalQnA/tests/test_compose_vllm_on_rocm.sh b/MultimodalQnA/tests/test_compose_vllm_on_rocm.sh index 56774e87a7..1283282d08 100644 --- a/MultimodalQnA/tests/test_compose_vllm_on_rocm.sh +++ b/MultimodalQnA/tests/test_compose_vllm_on_rocm.sh @@ -44,6 +44,7 @@ function setup_env() { function start_services() { cd $WORKPATH/docker_compose/amd/gpu/rocm + export no_proxy="localhost,127.0.0.1,$ip_address" docker compose -f compose_vllm.yaml up -d > ${LOG_PATH}/start_services_with_compose.log n=0 until [[ "$n" -ge 100 ]]; do diff --git a/SearchQnA/tests/test_compose_on_epyc.sh b/SearchQnA/tests/test_compose_on_epyc.sh index f6edbc01cd..8c13836682 100644 --- a/SearchQnA/tests/test_compose_on_epyc.sh +++ b/SearchQnA/tests/test_compose_on_epyc.sh @@ -39,6 +39,7 @@ function start_services() { export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:3008/v1/searchqna" export host_ip=${ip_address} export LOGFLAG=true + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/SearchQnA/tests/test_compose_on_gaudi.sh b/SearchQnA/tests/test_compose_on_gaudi.sh index 7744b15be4..00ccab8fe1 100644 --- a/SearchQnA/tests/test_compose_on_gaudi.sh +++ b/SearchQnA/tests/test_compose_on_gaudi.sh @@ -38,6 +38,7 @@ function start_services() { export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:3008/v1/searchqna" export host_ip=${ip_address} export LOGFLAG=true + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh cd hpu/gaudi diff --git a/SearchQnA/tests/test_compose_on_xeon.sh b/SearchQnA/tests/test_compose_on_xeon.sh index 0c05e4dcb8..94d7100abd 100644 --- a/SearchQnA/tests/test_compose_on_xeon.sh +++ b/SearchQnA/tests/test_compose_on_xeon.sh @@ -37,6 +37,7 @@ function start_services() { export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:3008/v1/searchqna" export host_ip=${ip_address} export LOGFLAG=true + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh cd cpu/xeon diff --git a/Translation/tests/test_compose_on_epyc.sh b/Translation/tests/test_compose_on_epyc.sh index 57abfdcfc2..1d529b5938 100644 --- a/Translation/tests/test_compose_on_epyc.sh +++ b/Translation/tests/test_compose_on_epyc.sh @@ -36,6 +36,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/cpu/epyc/ export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/Translation/tests/test_compose_on_gaudi.sh b/Translation/tests/test_compose_on_gaudi.sh index 9d47a03ae9..674f396feb 100644 --- a/Translation/tests/test_compose_on_gaudi.sh +++ b/Translation/tests/test_compose_on_gaudi.sh @@ -34,6 +34,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh cd hpu/gaudi diff --git a/Translation/tests/test_compose_on_xeon.sh b/Translation/tests/test_compose_on_xeon.sh index 6d301eeea4..99b4fcacfb 100644 --- a/Translation/tests/test_compose_on_xeon.sh +++ b/Translation/tests/test_compose_on_xeon.sh @@ -34,6 +34,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source set_env.sh cd cpu/xeon diff --git a/VideoQnA/tests/test_compose_on_xeon.sh b/VideoQnA/tests/test_compose_on_xeon.sh index 8ee2534fa4..2e9ecea39d 100755 --- a/VideoQnA/tests/test_compose_on_xeon.sh +++ b/VideoQnA/tests/test_compose_on_xeon.sh @@ -42,7 +42,7 @@ function start_services() { echo "Starting services..." cd $WORKPATH/docker_compose/intel/cpu/xeon/ source ./set_env.sh - + export no_proxy="localhost,127.0.0.1,$ip_address" docker volume create video-llama-model docker volume create videoqna-cache docker compose up vdms-vector-db dataprep -d diff --git a/VisualQnA/tests/test_compose_on_epyc.sh b/VisualQnA/tests/test_compose_on_epyc.sh index e702b40066..3edc314ef7 100644 --- a/VisualQnA/tests/test_compose_on_epyc.sh +++ b/VisualQnA/tests/test_compose_on_epyc.sh @@ -35,6 +35,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/cpu/epyc/ source ./set_env.sh + export no_proxy="localhost,127.0.0.1,$ip_address" sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env # Start Docker Containers docker compose up -d >${LOG_PATH}/start_services_with_compose.log diff --git a/VisualQnA/tests/test_compose_on_xeon.sh b/VisualQnA/tests/test_compose_on_xeon.sh index 7ff1ccf0aa..28e1729304 100644 --- a/VisualQnA/tests/test_compose_on_xeon.sh +++ b/VisualQnA/tests/test_compose_on_xeon.sh @@ -32,6 +32,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env # Start Docker Containers diff --git a/VisualQnA/tests/test_compose_tgi_on_epyc.sh b/VisualQnA/tests/test_compose_tgi_on_epyc.sh index e61afa9baa..5dcadd5167 100644 --- a/VisualQnA/tests/test_compose_tgi_on_epyc.sh +++ b/VisualQnA/tests/test_compose_tgi_on_epyc.sh @@ -39,6 +39,7 @@ function start_services() { export HF_TOKEN=${HF_TOKEN} export NGINX_PORT=80 export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/VisualQnA/tests/test_compose_tgi_on_gaudi.sh b/VisualQnA/tests/test_compose_tgi_on_gaudi.sh index ba49821249..e80ad08dbe 100644 --- a/VisualQnA/tests/test_compose_tgi_on_gaudi.sh +++ b/VisualQnA/tests/test_compose_tgi_on_gaudi.sh @@ -37,6 +37,7 @@ function start_services() { export HF_TOKEN=${HF_TOKEN} export NGINX_PORT=80 export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh export LVM_MODEL_ID="llava-hf/llava-v1.6-mistral-7b-hf" diff --git a/VisualQnA/tests/test_compose_tgi_on_xeon.sh b/VisualQnA/tests/test_compose_tgi_on_xeon.sh index 270b638350..4a7fc03004 100644 --- a/VisualQnA/tests/test_compose_tgi_on_xeon.sh +++ b/VisualQnA/tests/test_compose_tgi_on_xeon.sh @@ -37,6 +37,7 @@ function start_services() { export HF_TOKEN=${HF_TOKEN} export NGINX_PORT=80 export host_ip=${ip_address} + export no_proxy="localhost,127.0.0.1,$ip_address" source ./set_env.sh sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env From b611e1095ba289737a799f140b4da661072e711a Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Fri, 19 Sep 2025 16:06:30 +0800 Subject: [PATCH 23/78] Use built image opea/vllm-gaudi:1.4 for AgentQnA, CodeGen, CodeTrans, DocSum and VisualQnA. (#2277) Signed-off-by: ZePan110 --- AgentQnA/docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- AgentQnA/docker_image_build/build.yaml | 6 ------ AgentQnA/tests/step1_build_images.sh | 9 ++------- CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- CodeGen/docker_image_build/build.yaml | 6 ------ CodeGen/tests/test_compose_on_gaudi.sh | 8 +------- CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- CodeTrans/docker_image_build/build.yaml | 6 ------ CodeTrans/tests/test_compose_on_gaudi.sh | 6 +----- DocSum/docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- DocSum/docker_image_build/build.yaml | 6 ------ DocSum/tests/test_compose_on_gaudi.sh | 6 +----- VisualQnA/docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- VisualQnA/docker_image_build/build.yaml | 6 ------ VisualQnA/tests/test_compose_on_gaudi.sh | 6 +----- 15 files changed, 11 insertions(+), 64 deletions(-) diff --git a/AgentQnA/docker_compose/intel/hpu/gaudi/compose.yaml b/AgentQnA/docker_compose/intel/hpu/gaudi/compose.yaml index 5f197b49c2..8680b83443 100644 --- a/AgentQnA/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/AgentQnA/docker_compose/intel/hpu/gaudi/compose.yaml @@ -128,7 +128,7 @@ services: OPENAI_API_BASE_URLS: ${SUPERVISOR_AGENT_ENDPOINT} ENABLE_OLLAMA_API: False vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.4 container_name: vllm-gaudi-server ports: - "8086:8000" diff --git a/AgentQnA/docker_image_build/build.yaml b/AgentQnA/docker_image_build/build.yaml index 7db63b6fa8..cf757968de 100644 --- a/AgentQnA/docker_image_build/build.yaml +++ b/AgentQnA/docker_image_build/build.yaml @@ -17,12 +17,6 @@ services: dockerfile: ./docker/Dockerfile extends: agent image: ${REGISTRY:-opea}/agent-ui:${TAG:-latest} - vllm-gaudi: - build: - context: vllm-fork - dockerfile: Dockerfile.hpu - extends: agent - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} vllm-rocm: build: context: GenAIComps diff --git a/AgentQnA/tests/step1_build_images.sh b/AgentQnA/tests/step1_build_images.sh index 58b5c8d6e8..5a13c8a52b 100644 --- a/AgentQnA/tests/step1_build_images.sh +++ b/AgentQnA/tests/step1_build_images.sh @@ -40,12 +40,8 @@ function build_agent_docker_image_gaudi_vllm() { cd $WORKDIR/GenAIExamples/AgentQnA/docker_image_build/ get_genai_comps - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ - echo "Build agent image with --no-cache..." - service_list="agent agent-ui vllm-gaudi" + service_list="agent agent-ui" docker compose -f build.yaml build ${service_list} --no-cache } @@ -83,6 +79,7 @@ function main() { "rocm_vllm") echo "==================== Build agent docker image for ROCm VLLM ====================" build_agent_docker_image_rocm_vllm + docker image ls | grep vllm ;; "gaudi_vllm") echo "==================== Build agent docker image for Gaudi ====================" @@ -97,8 +94,6 @@ function main() { exit 1 ;; esac - - docker image ls | grep vllm } main $1 diff --git a/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml b/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml index 2e56d1b913..92e2cb02da 100644 --- a/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml @@ -3,7 +3,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.4 container_name: vllm-gaudi-server ports: - "8028:80" diff --git a/CodeGen/docker_image_build/build.yaml b/CodeGen/docker_image_build/build.yaml index 8308291eb2..f0bfa23580 100644 --- a/CodeGen/docker_image_build/build.yaml +++ b/CodeGen/docker_image_build/build.yaml @@ -78,12 +78,6 @@ services: dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu extends: codegen image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest} - vllm-gaudi: - build: - context: vllm-fork - dockerfile: Dockerfile.hpu - extends: codegen - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} dataprep: build: context: GenAIComps diff --git a/CodeGen/tests/test_compose_on_gaudi.sh b/CodeGen/tests/test_compose_on_gaudi.sh index 9e0339f07a..4006df050d 100644 --- a/CodeGen/tests/test_compose_on_gaudi.sh +++ b/CodeGen/tests/test_compose_on_gaudi.sh @@ -25,14 +25,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - # Download Gaudi vllm of latest tag - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_FORK_VER}" - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="codegen codegen-gradio-ui llm-textgen vllm-gaudi dataprep retriever embedding" + service_list="codegen codegen-gradio-ui llm-textgen dataprep retriever embedding" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml b/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml index 60728feabf..96ea18b3b0 100644 --- a/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml @@ -3,7 +3,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.4 container_name: codetrans-gaudi-vllm-service ports: - "8008:80" diff --git a/CodeTrans/docker_image_build/build.yaml b/CodeTrans/docker_image_build/build.yaml index ad63a520b6..f0d9797c80 100644 --- a/CodeTrans/docker_image_build/build.yaml +++ b/CodeTrans/docker_image_build/build.yaml @@ -25,12 +25,6 @@ services: dockerfile: comps/llms/src/text-generation/Dockerfile extends: codetrans image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest} - vllm-gaudi: - build: - context: vllm-fork - dockerfile: Dockerfile.hpu - extends: codetrans - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} nginx: build: context: GenAIComps diff --git a/CodeTrans/tests/test_compose_on_gaudi.sh b/CodeTrans/tests/test_compose_on_gaudi.sh index 759636fbd4..ae6582473a 100644 --- a/CodeTrans/tests/test_compose_on_gaudi.sh +++ b/CodeTrans/tests/test_compose_on_gaudi.sh @@ -25,12 +25,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="codetrans codetrans-ui llm-textgen vllm-gaudi nginx" + service_list="codetrans codetrans-ui llm-textgen nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s diff --git a/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml b/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml index 9311817f32..fbc10c06ca 100644 --- a/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml @@ -3,7 +3,7 @@ services: vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.4 container_name: docsum-gaudi-vllm-service ports: - ${LLM_ENDPOINT_PORT:-8008}:80 diff --git a/DocSum/docker_image_build/build.yaml b/DocSum/docker_image_build/build.yaml index d26c32fe5a..dcd4433ad0 100644 --- a/DocSum/docker_image_build/build.yaml +++ b/DocSum/docker_image_build/build.yaml @@ -54,9 +54,3 @@ services: context: GenAIComps dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest} - vllm-gaudi: - build: - context: vllm-fork - dockerfile: Dockerfile.hpu - extends: docsum - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} diff --git a/DocSum/tests/test_compose_on_gaudi.sh b/DocSum/tests/test_compose_on_gaudi.sh index 033c532609..598e373524 100644 --- a/DocSum/tests/test_compose_on_gaudi.sh +++ b/DocSum/tests/test_compose_on_gaudi.sh @@ -46,12 +46,8 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ - echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="docsum docsum-gradio-ui whisper llm-docsum vllm-gaudi" + service_list="docsum docsum-gradio-ui whisper llm-docsum" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log 2>&1 docker images && sleep 1s diff --git a/VisualQnA/docker_compose/intel/hpu/gaudi/compose.yaml b/VisualQnA/docker_compose/intel/hpu/gaudi/compose.yaml index 3430ac1052..aa2ae3b1fe 100644 --- a/VisualQnA/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/VisualQnA/docker_compose/intel/hpu/gaudi/compose.yaml @@ -3,7 +3,7 @@ services: vllm-gaudi-service: - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + image: opea/vllm-gaudi:1.4 container_name: vllm-gaudi-service ports: - ${VLLM_PORT:-8399}:80 diff --git a/VisualQnA/docker_image_build/build.yaml b/VisualQnA/docker_image_build/build.yaml index 5693935fb6..5213802652 100644 --- a/VisualQnA/docker_image_build/build.yaml +++ b/VisualQnA/docker_image_build/build.yaml @@ -37,9 +37,3 @@ services: dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu extends: visualqna image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest} - vllm-gaudi: - build: - context: vllm-fork - dockerfile: Dockerfile.hpu - extends: visualqna - image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} diff --git a/VisualQnA/tests/test_compose_on_gaudi.sh b/VisualQnA/tests/test_compose_on_gaudi.sh index 1df520183c..29d50bae5e 100644 --- a/VisualQnA/tests/test_compose_on_gaudi.sh +++ b/VisualQnA/tests/test_compose_on_gaudi.sh @@ -26,11 +26,7 @@ function build_docker_images() { docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0 - git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../ - - service_list="visualqna visualqna-ui lvm nginx vllm-gaudi" + service_list="visualqna visualqna-ui lvm nginx" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker images && sleep 1s } From c5e51892ddd8fd3c1d9239c202808b4d6293bdfd Mon Sep 17 00:00:00 2001 From: WenjiaoYue Date: Tue, 23 Sep 2025 08:45:53 +0800 Subject: [PATCH 24/78] UI sync with current codeGen backend (#2252) Signed-off-by: WenjiaoYue Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../intel/cpu/xeon/compose.yaml | 2 +- .../intel/cpu/xeon/compose_tgi.yaml | 2 +- CodeGen/tests/test_compose_on_xeon.sh | 12 +- .../svelte/src/lib/modules/chat/Output.svelte | 226 ++++++++++++--- CodeGen/ui/svelte/src/routes/+page.svelte | 265 ++++++++++++++---- 5 files changed, 407 insertions(+), 100 deletions(-) diff --git a/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml b/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml index e4ae6c255b..99cbe8a373 100644 --- a/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml +++ b/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml @@ -66,7 +66,7 @@ services: ipc: host restart: always codegen-xeon-ui-server: - image: ${REGISTRY:-opea}/codegen-gradio-ui:${TAG:-latest} + image: ${REGISTRY:-opea}/codegen-ui:${TAG:-latest} container_name: codegen-xeon-ui-server depends_on: - codegen-xeon-backend-server diff --git a/CodeGen/docker_compose/intel/cpu/xeon/compose_tgi.yaml b/CodeGen/docker_compose/intel/cpu/xeon/compose_tgi.yaml index 7c1c3802e5..0da9cdddd3 100644 --- a/CodeGen/docker_compose/intel/cpu/xeon/compose_tgi.yaml +++ b/CodeGen/docker_compose/intel/cpu/xeon/compose_tgi.yaml @@ -66,7 +66,7 @@ services: ipc: host restart: always codegen-xeon-ui-server: - image: ${REGISTRY:-opea}/codegen-gradio-ui:${TAG:-latest} + image: ${REGISTRY:-opea}/codegen-ui:${TAG:-latest} container_name: codegen-xeon-ui-server depends_on: - codegen-xeon-backend-server diff --git a/CodeGen/tests/test_compose_on_xeon.sh b/CodeGen/tests/test_compose_on_xeon.sh index 5be0455d74..ba75e95aad 100644 --- a/CodeGen/tests/test_compose_on_xeon.sh +++ b/CodeGen/tests/test_compose_on_xeon.sh @@ -27,7 +27,7 @@ function build_docker_images() { popd && sleep 1s echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="codegen codegen-gradio-ui llm-textgen dataprep retriever embedding" + service_list="codegen codegen-ui llm-textgen dataprep retriever embedding" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log @@ -174,6 +174,8 @@ function validate_frontend() { npm install && npm ci && npx playwright install --with-deps node -v && npm -v && pip list + export no_proxy="localhost,127.0.0.1,$ip_address" + exit_status=0 npx playwright test || exit_status=$? @@ -244,8 +246,12 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_gradio" - validate_gradio + # echo "::group::validate_gradio" + # validate_gradio + # echo "::endgroup::" + + echo "::group::validate_ui" + validate_frontend echo "::endgroup::" stop_docker "${docker_compose_files[${i}]}" diff --git a/CodeGen/ui/svelte/src/lib/modules/chat/Output.svelte b/CodeGen/ui/svelte/src/lib/modules/chat/Output.svelte index 015375ad6f..5bf3012431 100644 --- a/CodeGen/ui/svelte/src/lib/modules/chat/Output.svelte +++ b/CodeGen/ui/svelte/src/lib/modules/chat/Output.svelte @@ -37,12 +37,24 @@ import bash from "svelte-highlight/languages/bash"; import sql from "svelte-highlight/languages/sql"; import { marked } from "marked"; - export let label = ""; + import { afterUpdate, onMount } from "svelte"; + export let output = ""; - export let languages = "Python"; + export let lang = "Python"; export let isCode = false; + export let md_output = ""; + export let segments: Segment[] = []; + let outputEl: HTMLDivElement; let copyText = "copy"; + let shouldAutoscroll = true; + + type Segment = { + id: number; + type: "text" | "code"; + content: string; + lang?: string; + }; const languagesTag = { Typescript: typescript, @@ -65,53 +77,194 @@ Lua: lua, Bash: bash, Sql: sql, - } as { [key: string]: any }; - - function copyToClipboard(text) { - const textArea = document.createElement("textarea"); - textArea.value = text; - document.body.appendChild(textArea); - textArea.select(); - document.execCommand("copy"); - document.body.removeChild(textArea); + } as const; + + type LangKey = keyof typeof languagesTag; + + const aliasMap: Record = { + javascript: "Javascript", + js: "Javascript", + jsx: "Javascript", + typescript: "Typescript", + ts: "Typescript", + tsx: "Typescript", + + python: "Python", + py: "Python", + + c: "C", + "c++": "Cpp", + cpp: "Cpp", + cxx: "Cpp", + csharp: "Csharp", + "c#": "Csharp", + + go: "Go", + golang: "Go", + java: "Java", + swift: "Swift", + ruby: "Ruby", + rust: "Rust", + php: "Php", + kotlin: "Kotlin", + objectivec: "Objectivec", + objc: "Objectivec", + "objective-c": "Objectivec", + perl: "Perl", + matlab: "Matlab", + r: "R", + lua: "Lua", + + bash: "Bash", + sh: "Bash", + shell: "Bash", + zsh: "Bash", + + sql: "Sql", + }; + + $: normalizedLangKey = (() => { + const raw = (lang ?? "").toString().trim(); + if (!raw) return null; + const lower = raw.toLowerCase(); + + if (lower in aliasMap) return aliasMap[lower]; + + const hit = (Object.keys(languagesTag) as LangKey[]).find( + (k) => k.toLowerCase() === lower + ); + return hit ?? null; + })(); + + $: fullText = buildFullText(); + + function atBottom(el: HTMLElement, threshold = 8) { + return el.scrollHeight - el.scrollTop - el.clientHeight <= threshold; + } + + function handleScroll() { + if (!outputEl) return; + shouldAutoscroll = atBottom(outputEl); } - function handelCopy() { - copyToClipboard(output); + function scrollToBottom() { + if (!outputEl) return; + requestAnimationFrame(() => + requestAnimationFrame(() => { + if (outputEl.scrollHeight) { + outputEl.scrollTop = outputEl.scrollHeight; + } + }) + ); + } + + onMount(() => { + scrollToBottom(); + }); + + afterUpdate(() => { + if (shouldAutoscroll) scrollToBottom(); + }); + async function copyAllFromDiv() { + await navigator.clipboard.writeText(outputEl.innerText); copyText = "copied!"; - setTimeout(() => { - copyText = "copy"; - }, 1000); + setTimeout(() => (copyText = "copy"), 1000); + } + + function copyToClipboard(text: string) { + if (navigator?.clipboard?.writeText) { + navigator.clipboard.writeText(text); + } else { + const textArea = document.createElement("textarea"); + textArea.value = text; + document.body.appendChild(textArea); + textArea.select(); + document.execCommand("copy"); + document.body.removeChild(textArea); + } + } + + function normalizeToKey(raw?: string | null) { + const s = (raw ?? "").trim().toLowerCase(); + if (!s) return null; + if (s in aliasMap) return aliasMap[s as keyof typeof aliasMap]; + const hit = ( + Object.keys(languagesTag) as (keyof typeof languagesTag)[] + ).find((k) => k.toLowerCase() === s); + return hit ?? null; + } + + function buildFullText(): string { + if (segments && segments.length > 0) { + return segments + .map((seg) => { + if (seg.type === "code") { + const key = normalizeToKey(seg.lang) ?? "text"; + return ["```" + key.toLowerCase(), seg.content, "```"].join("\n"); + } + return seg.content; + }) + .join("\n\n"); + } + + const parts: string[] = []; + if (isCode && output) { + const key = (normalizedLangKey ?? "text").toLowerCase(); + parts.push(["```" + key, output, "```"].join("\n")); + } + if (md_output) { + parts.push(md_output); + } + return parts.join("\n\n"); }
- {label} -
{copyText}
+
- {#if isCode} - - - + {#if segments && segments.length > 0} + {#each segments as seg (seg.id)} + {#if seg.type === "code"} +
+ + + +
+ {:else} +
{@html marked(seg.content)}
+ {/if} + {/each} {:else} -
- {@html marked(output)} -
+ {#if isCode && output} + + + + {/if} + {#if md_output} +
+ {@html marked(md_output)} +
+ {/if} {/if}
@@ -120,17 +273,8 @@ .hiddenScroll::-webkit-scrollbar { display: none; } - .hiddenScroll { -ms-overflow-style: none; /* IE and Edge */ scrollbar-width: none; /* Firefox */ } - - .code-format-style { - resize: none; - font-size: 16px; - border: solid rgba(128, 0, 128, 0) 4px; - box-shadow: 0 0 8px rgba(0, 0, 0, 0.19); - transition: 0.1s linear; - } diff --git a/CodeGen/ui/svelte/src/routes/+page.svelte b/CodeGen/ui/svelte/src/routes/+page.svelte index 0e7d43beaf..d927ea9322 100644 --- a/CodeGen/ui/svelte/src/routes/+page.svelte +++ b/CodeGen/ui/svelte/src/routes/+page.svelte @@ -23,38 +23,196 @@ import PaperAirplane from "$lib/assets/chat/svelte/PaperAirplane.svelte"; import Output from "$lib/modules/chat/Output.svelte"; - let code_output: string = ""; let query: string = ""; let loading: boolean = false; - let deleteFlag: boolean = false; + let inFence = false; + let tickRun = 0; + let skipLangLine = false; + let langBuf = ""; + let currentLang = ""; + + type Segment = { + id: number; + type: "text" | "code"; + content: string; + lang?: string; + }; + let segments: Segment[] = []; + let _sid = 0; + + const languageAliases: Record = { + javascript: "Javascript", + js: "Javascript", + jsx: "Javascript", + typescript: "Typescript", + ts: "Typescript", + tsx: "Typescript", + + python: "Python", + py: "Python", + + c: "C", + "c++": "Cpp", + cpp: "Cpp", + cxx: "Cpp", + csharp: "Csharp", + "c#": "Csharp", + + go: "Go", + golang: "Go", + java: "Java", + swift: "Swift", + ruby: "Ruby", + rust: "Rust", + php: "Php", + kotlin: "Kotlin", + objectivec: "Objectivec", + objc: "Objectivec", + "objective-c": "Objectivec", + perl: "Perl", + matlab: "Matlab", + r: "R", + lua: "Lua", + + bash: "Bash", + sh: "Bash", + shell: "Bash", + zsh: "Bash", + + sql: "Sql", + }; + + function canonicalLang(raw?: string | null): string | null { + const s = (raw ?? "").toString().trim(); + if (!s) return null; + const lower = s.toLowerCase(); + return languageAliases[lower] ?? s; + } + + function appendText(s: string) { + if (!s) return; + const last = segments[segments.length - 1]; + if (!last || last.type !== "text") { + segments = [...segments, { id: ++_sid, type: "text", content: "" }]; + } + segments[segments.length - 1].content += s; + } + + function appendCode(s: string) { + if (!s) return; + const last = segments[segments.length - 1]; + if (!last || last.type !== "code") { + segments = [ + ...segments, + { + id: ++_sid, + type: "code", + content: "", + lang: currentLang || "python", + }, + ]; + } + segments[segments.length - 1].content += s; + } + + function settleTicks() { + if (tickRun === 0) return; + + if (tickRun >= 3) { + const toggles = Math.floor(tickRun / 3); + for (let i = 0; i < toggles; i++) { + inFence = !inFence; + if (inFence) { + skipLangLine = true; + langBuf = ""; + currentLang = ""; + } else { + skipLangLine = false; + } + } + const leftovers = tickRun % 3; + if (leftovers) (inFence ? appendCode : appendText)("`".repeat(leftovers)); + } else { + (inFence ? appendCode : appendText)("`".repeat(tickRun)); + } + tickRun = 0; + } + + function consumeChunk(s: string) { + for (let i = 0; i < s.length; i++) { + const ch = s[i]; + + if (ch === "`") { + tickRun++; + continue; + } + + settleTicks(); + + if (skipLangLine) { + if (ch === "\n") { + skipLangLine = false; + const canon = canonicalLang(langBuf); + currentLang = canon ?? (langBuf.trim() || "python"); + langBuf = ""; + } else { + langBuf += ch; + } + continue; + } + + if (inFence) appendCode(ch); + else appendText(ch); + } + } const callTextStream = async (query: string) => { loading = true; - code_output = ""; + + segments = []; + _sid = 0; + inFence = false; + tickRun = 0; + skipLangLine = false; + langBuf = ""; + currentLang = ""; + const eventSource = await fetchTextStream(query); eventSource.addEventListener("message", (e: any) => { - let res = e.data; + const raw = String(e.data); + const payloads = raw + .split(/\r?\n/) + .map((l) => l.replace(/^data:\s*/, "").trim()) + .filter((l) => l.length > 0); - if (res === "[DONE]") { - deleteFlag = false; - loading = false; - query = ''; - } else { - let Msg = JSON.parse(res).choices[0].text; - if (Msg.includes("'''")) { - deleteFlag = true; - } else if (deleteFlag && Msg.includes("\\n")) { - deleteFlag = false; - } else if (Msg !== "" && !deleteFlag) { - code_output += Msg.replace(/\\n/g, "\n"); - } + for (const part of payloads) { + if (part === "[DONE]") { + settleTicks(); + loading = false; + return; } + try { + const json = JSON.parse(part); + const msg = + json.choices?.[0]?.delta?.content ?? json.choices?.[0]?.text ?? ""; + if (!msg || msg === "") continue; + consumeChunk(msg); + } catch (err) { + console.error("JSON chunk parse error:", err, part); + } + } + }); + + eventSource.addEventListener("error", () => { + loading = false; }); + eventSource.stream(); }; const handleTextSubmit = async () => { + if (!query) return; await callTextStream(query); }; @@ -62,48 +220,47 @@
-
-
-
- +
+
+ { - if (event.key === "Enter" && !event.shiftKey && query) { - event.preventDefault(); - handleTextSubmit(); - } - }} - /> - -
+ type="text" + data-testid="code-input" + placeholder="Enter prompt here" + disabled={loading} + maxlength="1200" + bind:value={query} + on:keydown={(event) => { + if (event.key === "Enter" && !event.shiftKey && query) { + event.preventDefault(); + handleTextSubmit(); + } + }} + /> + +
- {#if code_output !== ""} -
- -
+ {#if segments.length} +
+ +
{/if} {#if loading} From 301b98bc582248dcafe1da1ea26aaae089fa07d4 Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Tue, 23 Sep 2025 10:49:50 +0800 Subject: [PATCH 25/78] refine UI test (#2238) Signed-off-by: ZePan110 --- .github/workflows/_run-docker-compose.yml | 5 ++ ChatQnA/tests/test_compose_faqgen_on_epyc.sh | 35 -------- ChatQnA/tests/test_compose_faqgen_on_gaudi.sh | 32 -------- ChatQnA/tests/test_compose_faqgen_on_rocm.sh | 34 -------- ChatQnA/tests/test_compose_faqgen_on_xeon.sh | 32 -------- .../tests/test_compose_faqgen_tgi_on_epyc.sh | 35 -------- .../tests/test_compose_faqgen_tgi_on_gaudi.sh | 32 -------- .../tests/test_compose_faqgen_tgi_on_xeon.sh | 32 -------- .../tests/test_compose_faqgen_vllm_on_rocm.sh | 33 -------- .../tests/test_compose_guardrails_on_gaudi.sh | 32 -------- ChatQnA/tests/test_compose_milvus_on_epyc.sh | 37 --------- ChatQnA/tests/test_compose_milvus_on_xeon.sh | 34 -------- ChatQnA/tests/test_compose_on_epyc.sh | 37 --------- ChatQnA/tests/test_compose_on_gaudi.sh | 34 -------- ChatQnA/tests/test_compose_on_rocm.sh | 34 -------- ChatQnA/tests/test_compose_on_xeon.sh | 33 -------- .../tests/test_compose_openeuler_on_xeon.sh | 4 - .../tests/test_compose_pinecone_on_epyc.sh | 33 -------- .../tests/test_compose_pinecone_on_xeon.sh | 30 ------- ChatQnA/tests/test_compose_qdrant_on_epyc.sh | 34 -------- ChatQnA/tests/test_compose_qdrant_on_xeon.sh | 31 -------- ChatQnA/tests/test_compose_tgi_on_epyc.sh | 37 --------- ChatQnA/tests/test_compose_tgi_on_gaudi.sh | 34 -------- ChatQnA/tests/test_compose_tgi_on_xeon.sh | 34 -------- ChatQnA/tests/test_compose_ui_on_xeon.sh | 79 +++++++++++++++++++ ChatQnA/tests/test_compose_vllm_on_rocm.sh | 33 -------- .../test_compose_without_rerank_on_epyc.sh | 37 --------- .../test_compose_without_rerank_on_gaudi.sh | 34 -------- .../test_compose_without_rerank_on_xeon.sh | 34 -------- 29 files changed, 84 insertions(+), 881 deletions(-) create mode 100644 ChatQnA/tests/test_compose_ui_on_xeon.sh diff --git a/.github/workflows/_run-docker-compose.yml b/.github/workflows/_run-docker-compose.yml index 33efac6c0e..21bcfc5a20 100644 --- a/.github/workflows/_run-docker-compose.yml +++ b/.github/workflows/_run-docker-compose.yml @@ -110,6 +110,11 @@ jobs: run_test_cases=$other_test_cases fi + if [[ "${{ hardware }}" == "xeon"* ]]; then + if [ -f "${{ github.workspace }}/${{ inputs.example }}/tests/test_ui_on_xeon.sh" ]; then + run_test_cases="$run_test_cases test_ui_on_xeon.sh"; + fi + fi test_cases=$(echo $run_test_cases | tr ' ' '\n' | sort -u | jq -R '.' | jq -sc '.') echo "test_cases=$test_cases" echo "test_cases=$test_cases" >> $GITHUB_OUTPUT diff --git a/ChatQnA/tests/test_compose_faqgen_on_epyc.sh b/ChatQnA/tests/test_compose_faqgen_on_epyc.sh index 04a8929651..c480fd1f2e 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_epyc.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_epyc.sh @@ -196,37 +196,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - CONDA_ROOT=$(conda info --base) - source "${CONDA_ROOT}/etc/profile.d/conda.sh" - conda activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - # npm install && npm ci && npx playwright install --with-deps - npm install && npm ci && npx playwright install - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/amd/cpu/epyc docker compose -f compose_faqgen.yaml down @@ -254,10 +223,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh b/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh index fe3e46809b..08e718e1ca 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh @@ -185,34 +185,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/hpu/gaudi docker compose -f compose_faqgen.yaml down @@ -240,10 +212,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_faqgen_on_rocm.sh b/ChatQnA/tests/test_compose_faqgen_on_rocm.sh index 893807377f..e3248c7999 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_rocm.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_rocm.sh @@ -163,36 +163,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd "$WORKPATH"/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniconda3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd "$WORKPATH"/docker_compose/amd/gpu/rocm docker compose -f compose_faqgen.yaml stop && docker compose rm -f @@ -220,10 +190,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_faqgen_on_xeon.sh b/ChatQnA/tests/test_compose_faqgen_on_xeon.sh index 4a9e51fb6a..76270813a8 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_xeon.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_xeon.sh @@ -194,34 +194,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/cpu/xeon docker compose -f compose_faqgen.yaml down @@ -249,10 +221,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_faqgen_tgi_on_epyc.sh b/ChatQnA/tests/test_compose_faqgen_tgi_on_epyc.sh index 6c3c27d366..d9800d543e 100644 --- a/ChatQnA/tests/test_compose_faqgen_tgi_on_epyc.sh +++ b/ChatQnA/tests/test_compose_faqgen_tgi_on_epyc.sh @@ -196,37 +196,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - CONDA_ROOT=$(conda info --base) - source "${CONDA_ROOT}/etc/profile.d/conda.sh" - conda activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - # npm install && npm ci && npx playwright install --with-deps - npm install && npm ci && npx playwright install - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/amd/cpu/epyc docker compose -f compose_faqgen_tgi.yaml down @@ -254,10 +223,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh b/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh index e4fe73f9f1..b346d8018d 100644 --- a/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh @@ -186,34 +186,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/hpu/gaudi docker compose -f compose_faqgen_tgi.yaml down @@ -241,10 +213,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_faqgen_tgi_on_xeon.sh b/ChatQnA/tests/test_compose_faqgen_tgi_on_xeon.sh index d0b823f204..ca3993b8be 100644 --- a/ChatQnA/tests/test_compose_faqgen_tgi_on_xeon.sh +++ b/ChatQnA/tests/test_compose_faqgen_tgi_on_xeon.sh @@ -198,34 +198,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/cpu/xeon docker compose -f compose_faqgen_tgi.yaml down @@ -253,10 +225,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_faqgen_vllm_on_rocm.sh b/ChatQnA/tests/test_compose_faqgen_vllm_on_rocm.sh index f344f0030a..8c24185f31 100644 --- a/ChatQnA/tests/test_compose_faqgen_vllm_on_rocm.sh +++ b/ChatQnA/tests/test_compose_faqgen_vllm_on_rocm.sh @@ -138,35 +138,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniconda3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/amd/gpu/rocm docker compose -f compose_vllm.yaml down @@ -194,10 +165,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh b/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh index 0e261159cb..09bfffabe1 100644 --- a/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh @@ -151,34 +151,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/hpu/gaudi docker compose -f compose_guardrails.yaml down @@ -206,10 +178,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_milvus_on_epyc.sh b/ChatQnA/tests/test_compose_milvus_on_epyc.sh index 212bed7a64..efba619f58 100644 --- a/ChatQnA/tests/test_compose_milvus_on_epyc.sh +++ b/ChatQnA/tests/test_compose_milvus_on_epyc.sh @@ -171,39 +171,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - CONDA_ROOT=$(conda info --base) - source "${CONDA_ROOT}/etc/profile.d/conda.sh" - conda activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - # npm install && npm ci && npx playwright install --with-deps - npm install && npm ci && npx playwright install - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { echo "In stop docker" echo $WORKPATH @@ -233,10 +200,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_milvus_on_xeon.sh b/ChatQnA/tests/test_compose_milvus_on_xeon.sh index 31e470f429..338c5515dc 100644 --- a/ChatQnA/tests/test_compose_milvus_on_xeon.sh +++ b/ChatQnA/tests/test_compose_milvus_on_xeon.sh @@ -170,36 +170,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { echo "In stop docker" echo $WORKPATH @@ -229,10 +199,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_on_epyc.sh b/ChatQnA/tests/test_compose_on_epyc.sh index 212bed7a64..efba619f58 100644 --- a/ChatQnA/tests/test_compose_on_epyc.sh +++ b/ChatQnA/tests/test_compose_on_epyc.sh @@ -171,39 +171,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - CONDA_ROOT=$(conda info --base) - source "${CONDA_ROOT}/etc/profile.d/conda.sh" - conda activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - # npm install && npm ci && npx playwright install --with-deps - npm install && npm ci && npx playwright install - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { echo "In stop docker" echo $WORKPATH @@ -233,10 +200,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_on_gaudi.sh b/ChatQnA/tests/test_compose_on_gaudi.sh index 090f907a2d..ace8e10b41 100644 --- a/ChatQnA/tests/test_compose_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_on_gaudi.sh @@ -130,36 +130,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - export no_proxy="localhost,127.0.0.1,$ip_address" - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/hpu/gaudi docker compose -f compose.yaml -f compose.telemetry.yaml down @@ -187,10 +157,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_on_rocm.sh b/ChatQnA/tests/test_compose_on_rocm.sh index 5e31ea9969..63d978b92b 100644 --- a/ChatQnA/tests/test_compose_on_rocm.sh +++ b/ChatQnA/tests/test_compose_on_rocm.sh @@ -148,36 +148,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd "$WORKPATH"/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniconda3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd "$WORKPATH"/docker_compose/amd/gpu/rocm docker compose stop && docker compose rm -f @@ -205,10 +175,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_on_xeon.sh b/ChatQnA/tests/test_compose_on_xeon.sh index 21a7b7676f..a241e4eeff 100644 --- a/ChatQnA/tests/test_compose_on_xeon.sh +++ b/ChatQnA/tests/test_compose_on_xeon.sh @@ -126,35 +126,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/cpu/xeon docker compose -f compose.yaml -f compose.telemetry.yaml down @@ -182,10 +153,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_openeuler_on_xeon.sh b/ChatQnA/tests/test_compose_openeuler_on_xeon.sh index ed6e7ed0d4..4b270360ca 100644 --- a/ChatQnA/tests/test_compose_openeuler_on_xeon.sh +++ b/ChatQnA/tests/test_compose_openeuler_on_xeon.sh @@ -183,10 +183,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_pinecone_on_epyc.sh b/ChatQnA/tests/test_compose_pinecone_on_epyc.sh index cb1341d921..10bb819ca5 100755 --- a/ChatQnA/tests/test_compose_pinecone_on_epyc.sh +++ b/ChatQnA/tests/test_compose_pinecone_on_epyc.sh @@ -166,39 +166,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - CONDA_ROOT=$(conda info --base) - source "${CONDA_ROOT}/etc/profile.d/conda.sh" - conda activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - # npm install && npm ci && npx playwright install --with-deps - npm install && npm ci && npx playwright install - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { echo "In stop docker" echo $WORKPATH diff --git a/ChatQnA/tests/test_compose_pinecone_on_xeon.sh b/ChatQnA/tests/test_compose_pinecone_on_xeon.sh index c3961600ed..f5efee6c80 100755 --- a/ChatQnA/tests/test_compose_pinecone_on_xeon.sh +++ b/ChatQnA/tests/test_compose_pinecone_on_xeon.sh @@ -168,36 +168,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { echo "In stop docker" echo $WORKPATH diff --git a/ChatQnA/tests/test_compose_qdrant_on_epyc.sh b/ChatQnA/tests/test_compose_qdrant_on_epyc.sh index 092bfb0bb1..e93983ec31 100644 --- a/ChatQnA/tests/test_compose_qdrant_on_epyc.sh +++ b/ChatQnA/tests/test_compose_qdrant_on_epyc.sh @@ -157,36 +157,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - CONDA_ROOT=$(conda info --base) - source "${CONDA_ROOT}/etc/profile.d/conda.sh" - conda activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - # npm install && npm ci && npx playwright install --with-deps - npm install && npm ci && npx playwright install - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/amd/cpu/epyc docker compose -f compose_qdrant.yaml down @@ -214,10 +184,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_qdrant_on_xeon.sh b/ChatQnA/tests/test_compose_qdrant_on_xeon.sh index 765fb90e33..2a522fa7a8 100644 --- a/ChatQnA/tests/test_compose_qdrant_on_xeon.sh +++ b/ChatQnA/tests/test_compose_qdrant_on_xeon.sh @@ -153,33 +153,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/cpu/xeon docker compose -f compose_qdrant.yaml down @@ -207,10 +180,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_tgi_on_epyc.sh b/ChatQnA/tests/test_compose_tgi_on_epyc.sh index 81cce137e9..98efb16223 100644 --- a/ChatQnA/tests/test_compose_tgi_on_epyc.sh +++ b/ChatQnA/tests/test_compose_tgi_on_epyc.sh @@ -178,39 +178,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - CONDA_ROOT=$(conda info --base) - source "${CONDA_ROOT}/etc/profile.d/conda.sh" - conda activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - # npm install && npm ci && npx playwright install --with-deps - npm install && npm ci && npx playwright install - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/amd/cpu/epyc docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml down @@ -238,10 +205,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_tgi_on_gaudi.sh b/ChatQnA/tests/test_compose_tgi_on_gaudi.sh index 28df0e83aa..eb79b34440 100644 --- a/ChatQnA/tests/test_compose_tgi_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_tgi_on_gaudi.sh @@ -173,36 +173,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - export no_proxy="localhost,127.0.0.1,$ip_address" - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/hpu/gaudi docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml down @@ -230,10 +200,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_tgi_on_xeon.sh b/ChatQnA/tests/test_compose_tgi_on_xeon.sh index 229151d04d..f3c1e8bffd 100644 --- a/ChatQnA/tests/test_compose_tgi_on_xeon.sh +++ b/ChatQnA/tests/test_compose_tgi_on_xeon.sh @@ -170,36 +170,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/cpu/xeon docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml down @@ -227,10 +197,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_ui_on_xeon.sh b/ChatQnA/tests/test_compose_ui_on_xeon.sh new file mode 100644 index 0000000000..5c3eb88d3f --- /dev/null +++ b/ChatQnA/tests/test_compose_ui_on_xeon.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -xe +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} +export MODEL_CACHE=${model_cache:-"./data"} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + echo "GenAIComps test commit is $(git rev-parse HEAD)" + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . + popd && sleep 1s + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="chatqna chatqna-ui dataprep retriever nginx" + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/intel/cpu/xeon + + source set_env.sh + + # Start Docker Containers + docker compose -f compose.yaml -f compose.telemetry.yaml up -d --quiet-pull > ${LOG_PATH}/start_services_with_compose.log + n=0 + until [[ "$n" -ge 100 ]]; do + docker logs vllm-service > ${LOG_PATH}/vllm_service_start.log 2>&1 + if grep -q complete ${LOG_PATH}/vllm_service_start.log; then + break + fi + sleep 5s + n=$((n+1)) + done +} + +function stop_docker() { + cd $WORKPATH/docker_compose/intel/cpu/xeon + docker compose -f compose.yaml -f compose.telemetry.yaml down +} + +function main() { + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + echo "::group::build_docker_images" + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + echo "::endgroup::" + + echo "::group::start_services" + start_services + echo "::endgroup::" + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + docker system prune -f + +} + +main diff --git a/ChatQnA/tests/test_compose_vllm_on_rocm.sh b/ChatQnA/tests/test_compose_vllm_on_rocm.sh index 9f61c05b89..d0e856960f 100644 --- a/ChatQnA/tests/test_compose_vllm_on_rocm.sh +++ b/ChatQnA/tests/test_compose_vllm_on_rocm.sh @@ -129,35 +129,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniconda3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/amd/gpu/rocm docker compose -f compose_vllm.yaml down @@ -185,10 +156,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_without_rerank_on_epyc.sh b/ChatQnA/tests/test_compose_without_rerank_on_epyc.sh index 9a1777bd15..1f14c10d10 100644 --- a/ChatQnA/tests/test_compose_without_rerank_on_epyc.sh +++ b/ChatQnA/tests/test_compose_without_rerank_on_epyc.sh @@ -166,39 +166,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - CONDA_ROOT=$(conda info --base) - source "${CONDA_ROOT}/etc/profile.d/conda.sh" - conda activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - # npm install && npm ci && npx playwright install --with-deps - npm install && npm ci && npx playwright install - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/amd/cpu/epyc/ docker compose -f compose_without_rerank.yaml down @@ -226,10 +193,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh b/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh index 24f72db727..62cbe30d3e 100644 --- a/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh @@ -163,36 +163,6 @@ function validate_megaservice() { } -function validate_frontend() { - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - export no_proxy="localhost,127.0.0.1,$ip_address" - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/hpu/gaudi docker compose -f compose_without_rerank.yaml down @@ -220,10 +190,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" diff --git a/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh b/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh index ad8d9c2f43..7e2157cee7 100644 --- a/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh +++ b/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh @@ -162,36 +162,6 @@ function validate_megaservice() { } -function validate_frontend() { - echo "[ TEST INFO ]: --------- frontend test started ---------" - cd $WORKPATH/ui/svelte - local conda_env_name="OPEA_e2e" - export PATH=${HOME}/miniforge3/bin/:$PATH - if conda info --envs | grep -q "$conda_env_name"; then - echo "$conda_env_name exist!" - else - conda create -n ${conda_env_name} python=3.12 -y - fi - source activate ${conda_env_name} - echo "[ TEST INFO ]: --------- conda env activated ---------" - - sed -i "s/localhost/$ip_address/g" playwright.config.ts - - conda install -c conda-forge nodejs=22.6.0 -y - npm install && npm ci && npx playwright install --with-deps - node -v && npm -v && pip list - - exit_status=0 - npx playwright test || exit_status=$? - - if [ $exit_status -ne 0 ]; then - echo "[TEST INFO]: ---------frontend test failed---------" - exit $exit_status - else - echo "[TEST INFO]: ---------frontend test passed---------" - fi -} - function stop_docker() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ docker compose -f compose_without_rerank.yaml down @@ -219,10 +189,6 @@ function main() { validate_megaservice echo "::endgroup::" - echo "::group::validate_frontend" - validate_frontend - echo "::endgroup::" - echo "::group::stop_docker" stop_docker echo "::endgroup::" From 03797c2cd5164b3ed9f67b3f5090a11283bbbb3d Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Tue, 23 Sep 2025 16:50:00 +0800 Subject: [PATCH 26/78] Fix CD workflow issue and ChatQnA gaudi test failure (#2281) Signed-off-by: ZePan110 --- .github/workflows/_run-docker-compose.yml | 12 ++++++++---- ChatQnA/docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- .../intel/hpu/gaudi/compose_guardrails.yaml | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/_run-docker-compose.yml b/.github/workflows/_run-docker-compose.yml index 21bcfc5a20..46dbed8b32 100644 --- a/.github/workflows/_run-docker-compose.yml +++ b/.github/workflows/_run-docker-compose.yml @@ -69,17 +69,19 @@ jobs: - name: Get test matrix shell: bash id: test-case-matrix + env: + HARDWARE: ${{ inputs.hardware }} run: | example_l=$(echo ${{ inputs.example }} | tr '[:upper:]' '[:lower:]') cd ${{ github.workspace }}/${{ inputs.example }}/tests run_test_cases="" - if [[ "${{ inputs.hardware }}" == "gaudi"* ]]; then + if [[ "$HARDWARE" == "gaudi"* ]]; then hardware="gaudi" - elif [[ "${{ inputs.hardware }}" == "xeon"* ]]; then + elif [[ "$HARDWARE" == "xeon"* ]]; then hardware="xeon" else - hardware="${{ inputs.hardware }}" + hardware="$HARDWARE" fi default_test_case=$(find . -type f -name "test_compose_on_$hardware.sh" | cut -d/ -f2) if [ "$default_test_case" ]; then run_test_cases="$default_test_case"; fi @@ -110,9 +112,11 @@ jobs: run_test_cases=$other_test_cases fi - if [[ "${{ hardware }}" == "xeon"* ]]; then + if [[ "$hardware" == "xeon"* ]]; then if [ -f "${{ github.workspace }}/${{ inputs.example }}/tests/test_ui_on_xeon.sh" ]; then run_test_cases="$run_test_cases test_ui_on_xeon.sh"; + elif [ -f "${{ github.workspace }}/${{ inputs.example }}/tests/test_ui_on_gaudi.sh" ]; then + run_test_cases="$run_test_cases test_ui_on_gaudi.sh"; fi fi test_cases=$(echo $run_test_cases | tr ' ' '\n' | sort -u | jq -R '.' | jq -sc '.') diff --git a/ChatQnA/docker_compose/intel/hpu/gaudi/compose.yaml b/ChatQnA/docker_compose/intel/hpu/gaudi/compose.yaml index cc4048c8d2..a762b21767 100644 --- a/ChatQnA/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/ChatQnA/docker_compose/intel/hpu/gaudi/compose.yaml @@ -92,7 +92,7 @@ services: MAX_WARMUP_SEQUENCE_LENGTH: 512 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:1.22.0 + image: opea/vllm-gaudi:1.22.0 container_name: vllm-gaudi-server ports: - "8007:80" diff --git a/ChatQnA/docker_compose/intel/hpu/gaudi/compose_guardrails.yaml b/ChatQnA/docker_compose/intel/hpu/gaudi/compose_guardrails.yaml index 69b1339e0c..46ccee4773 100644 --- a/ChatQnA/docker_compose/intel/hpu/gaudi/compose_guardrails.yaml +++ b/ChatQnA/docker_compose/intel/hpu/gaudi/compose_guardrails.yaml @@ -130,7 +130,7 @@ services: MAX_WARMUP_SEQUENCE_LENGTH: 512 command: --model-id ${RERANK_MODEL_ID} --auto-truncate vllm-service: - image: ${REGISTRY:-opea}/vllm-gaudi:1.22.0 + image: opea/vllm-gaudi:1.22.0 container_name: vllm-gaudi-server ports: - "8008:80" From f8d76e3e4a7b567186af83f1a7edce0b1b4d81ee Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Wed, 24 Sep 2025 18:15:54 +0800 Subject: [PATCH 27/78] Exclude AudioQnA k8s test (#2286) Signed-off-by: ZePan110 --- .github/workflows/weekly-one-click-test.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/weekly-one-click-test.yml b/.github/workflows/weekly-one-click-test.yml index 39174073bf..a2db8491aa 100644 --- a/.github/workflows/weekly-one-click-test.yml +++ b/.github/workflows/weekly-one-click-test.yml @@ -55,6 +55,10 @@ jobs: example: ${{ fromJson(needs.get-test-matrix.outputs.examples) }} node: ${{ fromJson(needs.get-test-matrix.outputs.nodes) }} deploy_method: ${{ fromJson(needs.get-test-matrix.outputs.deploy_methods) }} + exclude: + - example: AgentQnA + node: xeon + deploy_method: k8s fail-fast: false uses: ./.github/workflows/_run-one-click.yml with: From ba6ff938076a91dd2e15ebf3ef1b60f2f02a0dc4 Mon Sep 17 00:00:00 2001 From: Yao Qing Date: Thu, 25 Sep 2025 16:52:22 +0800 Subject: [PATCH 28/78] One click fix: Dynamically load defaults by executing set_env.sh (#2287) Signed-off-by: Yao, Qing Signed-off-by: ZePan110 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: ZePan110 --- .github/workflows/_run-one-click.yml | 19 +++----- .github/workflows/scripts/get-params.py | 41 ----------------- one_click_deploy/core/config.py | 13 +----- one_click_deploy/core/deployer.py | 34 +++++++++++---- one_click_deploy/core/utils.py | 58 +++++++++++++++++-------- 5 files changed, 73 insertions(+), 92 deletions(-) delete mode 100644 .github/workflows/scripts/get-params.py diff --git a/.github/workflows/_run-one-click.yml b/.github/workflows/_run-one-click.yml index 149790a5e8..af77dc5f39 100644 --- a/.github/workflows/_run-one-click.yml +++ b/.github/workflows/_run-one-click.yml @@ -123,20 +123,11 @@ jobs: id: get-default-params if: ${{ inputs.deploy_method == 'k8s' }} run: | - cd ${{ github.workspace }} - example=$(echo "${{ inputs.example }}" | cut -d'-' -f1) - PARAMS_JSON=$(python3 .github/workflows/scripts/get-params.py $example) - echo "LLM_model=$(echo "$PARAMS_JSON" | jq -r '.llm_model')" >> $GITHUB_ENV - if [ "$example" = "ChatQnA" ]; then - echo "LLM_model=$(echo "$PARAMS_JSON" | jq -r '.llm_model')" >> $GITHUB_ENV - echo "Embedding_model=$(echo "$PARAMS_JSON" | jq -r '.embed_model')" >> $GITHUB_ENV - echo "Reranking_model=$(echo "$PARAMS_JSON" | jq -r '.rerank_model')" >> $GITHUB_ENV - echo "Mount_dir=$(echo "$PARAMS_JSON" | jq -r '.mount_dir')" >> $GITHUB_ENV - elif [ "$example" = "VisualQnA" ]; then - echo "LVM_model=$(echo "$PARAMS_JSON" | jq -r '.lvm_model')" >> $GITHUB_ENV - elif [ "$example" = "AgentQnA" ]; then - echo "LLM_model=$(echo "$PARAMS_JSON" | jq -r '.gaudi.llm_model')" >> $GITHUB_ENV - fi + echo "LLM_model=" >> $GITHUB_ENV + echo "Embedding_model=" >> $GITHUB_ENV + echo "Reranking_model=" >> $GITHUB_ENV + echo "Mount_dir=" >> $GITHUB_ENV + echo "LVM_model=" >> $GITHUB_ENV - name: deploy and test shell: bash diff --git a/.github/workflows/scripts/get-params.py b/.github/workflows/scripts/get-params.py deleted file mode 100644 index d597facaa9..0000000000 --- a/.github/workflows/scripts/get-params.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (C) 2025 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import json -import os -import sys - -sys.path.append(os.path.join(os.path.dirname(__file__), "../../../one_click_deploy/core/")) -from config import EXAMPLE_CONFIGS - - -def get_example_defaults(example_name): - if example_name not in EXAMPLE_CONFIGS: - print(f"error: example '{example_name}' not found in EXAMPLE_CONFIGS") - sys.exit(1) - - example_config = EXAMPLE_CONFIGS[example_name] - params = example_config.get("interactive_params", {}) - - if isinstance(params, list): - return {param["name"]: param["default"] for param in params} - elif isinstance(params, dict): - return { - device: {param["name"]: param["default"] for param in device_params} - for device, device_params in params.items() - } - else: - return {"error": "Invalid params format in EXAMPLE_CONFIGS for example: " + example_name} - - -def main(): - example_name = sys.argv[1] - - defaults = get_example_defaults(example_name) - - print(json.dumps(defaults, indent=2, ensure_ascii=False)) - - -if __name__ == "__main__": - main() diff --git a/one_click_deploy/core/config.py b/one_click_deploy/core/config.py index 0c3002f3c9..a7f41f4028 100644 --- a/one_click_deploy/core/config.py +++ b/one_click_deploy/core/config.py @@ -100,29 +100,26 @@ "name": "llm_model", "prompt": "LLM Model ID", "type": str, - "default": "meta-llama/Meta-Llama-3-8B-Instruct", "help": "e.g., meta-llama/Meta-Llama-3-8B-Instruct", }, { "name": "embed_model", "prompt": "Embedding Model ID", "type": str, - "default": "BAAI/bge-base-en-v1.5", "help": "e.g., BAAI/bge-base-en-v1.5", }, { "name": "rerank_model", "prompt": "Reranking Model ID", "type": str, - "default": "BAAI/bge-reranker-base", "help": "e.g., BAAI/bge-reranker-base", }, { "name": "mount_dir", "prompt": "Data Mount Directory (for Docker)", "type": str, - "modes": ["docker"], "default": "./data", + "modes": ["docker"], }, ], }, @@ -193,7 +190,6 @@ "name": "llm_model", "prompt": "LLM Model ID (for Code Translation)", "type": str, - "default": "mistralai/Mistral-7B-Instruct-v0.3", "help": "e.g., mistralai/Mistral-7B-Instruct-v0.3", }, ], @@ -264,7 +260,6 @@ "name": "llm_model", "prompt": "LLM Model ID (for DocSum)", "type": str, - "default": "meta-llama/Meta-Llama-3-8B-Instruct", "help": "e.g., meta-llama/Meta-Llama-3-8B-Instruct", }, ], @@ -333,7 +328,6 @@ "name": "llm_model", "prompt": "LLM Model ID (for Code Generation)", "type": str, - "default": "Qwen/Qwen2.5-Coder-7B-Instruct", "help": "e.g., Qwen/Qwen2.5-Coder-7B-Instruct", }, ], @@ -409,7 +403,6 @@ "name": "llm_model", "prompt": "LLM Model ID (for Audio Q&A)", "type": str, - "default": "meta-llama/Meta-Llama-3-8B-Instruct", "help": "e.g., meta-llama/Meta-Llama-3-8B-Instruct", }, ], @@ -486,7 +479,6 @@ "name": "lvm_model", "prompt": "LVM Model ID (for Visual Q&A)", "type": str, - "default": "llava-hf/llava-v1.6-mistral-7b-hf", "help": "e.g., llava-hf/llava-v1.6-mistral-7b-hf", }, ], @@ -568,7 +560,6 @@ "name": "llm_model", "prompt": "LLM Model ID", "type": str, - "default": "meta-llama/Meta-Llama-3-8B-Instruct", "help": "e.g., meta-llama/Meta-Llama-3-8B-Instruct", }, ], @@ -659,14 +650,12 @@ "name": "llm_model", "prompt": "LLM Model ID (for Gaudi)", "type": str, - "default": "meta-llama/Meta-Llama-3-8B-Instruct", "help": "e.g., meta-llama/Meta-Llama-3-8B-Instruct", }, { "name": "num_shards", "prompt": "Number of Gaudi HPU cards (shards)", "type": int, - "default": 4, "help": "e.g., 1, 2, 4. Controls tensor parallel size.", }, ], diff --git a/one_click_deploy/core/deployer.py b/one_click_deploy/core/deployer.py index 3aaa79ce01..caabcecf39 100644 --- a/one_click_deploy/core/deployer.py +++ b/one_click_deploy/core/deployer.py @@ -345,26 +345,44 @@ def _interactive_setup_for_deploy(self): interactive_params = self._get_device_specific_or_common_config(["interactive_params"]) or [] + docker_param_map = self._get_device_specific_or_common_config(["docker_compose", "params_to_set_env"]) or {} + source_env_script = self._get_docker_set_env_script() + for param in interactive_params: if "modes" in param and self.args.deploy_mode not in param["modes"]: setattr(self.args, param["name"], None) continue + static_default = param.get("default") + + dynamic_default = None + env_var_name = docker_param_map.get(param["name"]) + if env_var_name and source_env_script: + dynamic_default = get_var_from_shell_script(source_env_script, env_var_name) + if dynamic_default: + log_message( + "DEBUG", + f"Found default for '{param['name']}' from script '{source_env_script.name}': {dynamic_default}", + ) + + final_default = dynamic_default if dynamic_default is not None else static_default prompt_text = param["prompt"] help_text = param.get("help") if help_text: prompt_text = f"{prompt_text} ({help_text})" - default_value = param.get("default") - is_required = param.get("required", False) + user_input = click.prompt(prompt_text, default=final_default, type=param.get("type", str)) - user_input = click.prompt(prompt_text, default=default_value, type=param.get("type", str)) + value_to_set = user_input if user_input else final_default + + is_required = param.get("required", False) - while is_required and (not user_input or user_input == default_value): + while is_required and not value_to_set: log_message("WARN", f"A valid '{param['prompt']}' is required. Please provide a real value.") user_input = click.prompt(prompt_text, type=param.get("type", str), default=None) + value_to_set = user_input if user_input else None - setattr(self.args, param["name"], user_input) + setattr(self.args, param["name"], value_to_set) self.args.do_check_env = click.confirm("Run environment check?", default=False, show_default=True) @@ -790,7 +808,7 @@ def _configure_docker(self): updates = { env_var: getattr(self.args, arg_name) for arg_name, env_var in params_to_env_map.items() - if hasattr(self.args, arg_name) and getattr(self.args, arg_name) is not None + if hasattr(self.args, arg_name) and getattr(self.args, arg_name) } user_proxies = {p.strip() for p in self.args.no_proxy.split(",") if p.strip()} @@ -834,7 +852,7 @@ def _configure_kubernetes(self): for name, path_or_paths in params_to_values.items(): if hasattr(self.args, name): value = getattr(self.args, name) - if value is None: + if not value: continue if isinstance(path_or_paths, list) and len(path_or_paths) > 0 and isinstance(path_or_paths[0], list): @@ -889,7 +907,7 @@ def deploy(self): log_message("ERROR", f"Local environment script '{local_env_file}' not found. Cannot deploy.") return False - compose_up_cmd = " ".join(compose_base_cmd + ["up", "-d", "--remove-orphans"]) + compose_up_cmd = " ".join(compose_base_cmd + ["up", "-d", "--remove-orphans", "--quiet-pull"]) if self.example_name == "ChatQnA" and self.args.device == "gaudi": compose_up_cmd = "source .env&&" + compose_up_cmd compose_dir = self._get_docker_compose_files()[0].parent diff --git a/one_click_deploy/core/utils.py b/one_click_deploy/core/utils.py index eb3880bade..74deacbf54 100644 --- a/one_click_deploy/core/utils.py +++ b/one_click_deploy/core/utils.py @@ -377,28 +377,52 @@ def stop_all_kubectl_port_forwards(): def get_var_from_shell_script(script_path: pathlib.Path, var_name: str) -> str | None: + """Gets the value of an environment variable by executing a shell script. + + This method is robust as it handles scripts with functions, sourcing other files, + and conditional logic. It executes the script in a non-interactive mode. + + Args: + script_path: The absolute path to the shell script. + var_name: The name of the environment variable to retrieve. + + Returns: + The value of the variable as a string, or None if not found or on error. + """ if not script_path or not script_path.exists(): log_message("DEBUG", f"Source script for variable extraction not found: {script_path}") return None - assignment_pattern = re.compile(rf"^\s*(?:export\s+)?{re.escape(var_name)}\s*=\s*(.*)") - self_ref_pattern = re.compile(r"^\s*(\$\{?" + re.escape(var_name) + r"\}?\,?)") + + command_string = f"NON_INTERACTIVE=true; " f'source "{script_path.resolve()}" > /dev/null; ' f'echo "${var_name}"' try: - lines = script_path.read_text().splitlines() - for line in reversed(lines): - match = assignment_pattern.match(line) - if match: - value = match.group(1).strip() - value = value.split("#", 1)[0].strip() - if (value.startswith('"') and value.endswith('"')) or (value.startswith("'") and value.endswith("'")): - value = value[1:-1] - value = self_ref_pattern.sub("", value).strip() - value = value.lstrip(",") - log_message("DEBUG", f"Extracted and cleaned value for '{var_name}': {value}") - return value - log_message("DEBUG", f"Variable '{var_name}' not found in {script_path}.") - return None + result = run_command( + ["bash", "-c", command_string], + cwd=script_path.parent, + capture_output=True, + check=False, + display_cmd=False, + ) + + if result.returncode != 0: + log_message( + "WARN", + f"Execution of '{script_path.name}' failed when trying to get var '{var_name}'. Stderr: {result.stderr.strip()}", + ) + return None + + value = result.stdout.strip() + + if value: + log_message("DEBUG", f"Extracted value for '{var_name}' from '{script_path.name}': {value}") + return value + else: + log_message("DEBUG", f"Variable '{var_name}' was not set or is empty in '{script_path.name}'.") + return None + except Exception as e: - log_message("WARN", f"Failed to parse variable '{var_name}' from {script_path}: {e}") + log_message( + "WARN", f"An unexpected error occurred while executing {script_path.name} to get var '{var_name}': {e}" + ) return None From 3c7f1e4ae45332c64761f722f3d2ece2a9f4fe34 Mon Sep 17 00:00:00 2001 From: Yi Yao Date: Fri, 26 Sep 2025 11:06:53 +0800 Subject: [PATCH 29/78] Porting DBQnA to Text2Query (#2291) Signed-off-by: Yi Yao --- DBQnA/docker_compose/amd/gpu/rocm/README.md | 29 ++++++++++------- .../docker_compose/amd/gpu/rocm/compose.yaml | 8 +++-- DBQnA/docker_compose/intel/cpu/xeon/README.md | 31 +++++++++++-------- .../intel/cpu/xeon/compose.yaml | 8 +++-- DBQnA/docker_image_build/build.yaml | 8 ++--- DBQnA/tests/test_compose_on_rocm.sh | 5 +-- DBQnA/tests/test_compose_on_xeon.sh | 5 +-- DBQnA/ui/react/src/App.test.tsx | 22 ++++++------- .../src/components/DbConnect/DBConnect.tsx | 22 ++++++++++--- 9 files changed, 83 insertions(+), 55 deletions(-) diff --git a/DBQnA/docker_compose/amd/gpu/rocm/README.md b/DBQnA/docker_compose/amd/gpu/rocm/README.md index f15b5db2b4..a0eda0c9bf 100644 --- a/DBQnA/docker_compose/amd/gpu/rocm/README.md +++ b/DBQnA/docker_compose/amd/gpu/rocm/README.md @@ -9,13 +9,17 @@ This document outlines the deployment process for DBQnA application which helps This section describes how to quickly deploy and test the DBQnA service manually on AMD GPU (ROCm). The basic steps are: -1. [Access the Code](#access-the-code) -2. [Generate a HuggingFace Access Token](#generate-a-huggingface-access-token) -3. [Configure the Deployment Environment](#configure-the-deployment-environment) -4. [Deploy the Service Using Docker Compose](#deploy-the-service-using-docker-compose) -5. [Check the Deployment Status](#check-the-deployment-status) -6. [Test the Pipeline](#test-the-pipeline) -7. [Cleanup the Deployment](#cleanup-the-deployment) +- [Example DBQnA Deployment on AMD GPU (ROCm)](#example-dbqna-deployment-on-amd-gpu-rocm) + - [DBQnA Quick Start Deployment](#dbqna-quick-start-deployment) + - [Access the Code](#access-the-code) + - [Generate a HuggingFace Access Token](#generate-a-huggingface-access-token) + - [Configure the Deployment Environment](#configure-the-deployment-environment) + - [Deploy the Service Using Docker Compose](#deploy-the-service-using-docker-compose) + - [Check the Deployment Status](#check-the-deployment-status) + - [Test the Pipeline](#test-the-pipeline) + - [Cleanup the Deployment](#cleanup-the-deployment) + - [DBQnA Docker Compose Files](#dbqna-docker-compose-files) + - [DBQnA Service Configuration for AMD GPUs](#dbqna-service-configuration-for-amd-gpus) ### Access the Code @@ -73,10 +77,11 @@ For the default deployment, the following 4 containers should be running. Once the DBQnA service are running, test the pipeline using the following command: ```bash -curl http://${host_ip}:${DBQNA_TEXT_TO_SQL_PORT}/v1/texttosql \ +url="postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${host_ip}:5442/${POSTGRES_DB}" +curl --connect-timeout 5 --max-time 120000 http://${host_ip}:9090/v1/text2query\ -X POST \ - -d '{"input_text": "Find the total number of Albums.","conn_str": {"user": "'${POSTGRES_USER}'","password": "'${POSTGRES_PASSWORD}'","host": "'${host_ip}'", "port": "5442", "database": "'${POSTGRES_DB}'"}}' \ - -H 'Content-Type: application/json' + -d '{"query": "Find the total number of Albums.","conn_type": "sql", "conn_url": "'${url}'", "conn_user": "'${POSTGRES_USER}'","conn_password": "'${POSTGRES_PASSWORD}'","conn_dialect": "postgresql" }' \ + -H 'Content-Type: application/json') ``` ### Cleanup the Deployment @@ -97,7 +102,7 @@ The compose.yaml is default compose file using tgi as serving framework | ----------------- | -------------------------------------------------------- | | dbqna-tgi-service | ghcr.io/huggingface/text-generation-inference:2.4.1-rocm | | postgres | postgres:latest | -| text2sql | opea/text2sql:latest | +| text2sql | opea/text2query-sql:latest | | text2sql-react-ui | opea/text2sql-react-ui:latest | ## DBQnA Service Configuration for AMD GPUs @@ -108,5 +113,5 @@ The table provides a comprehensive overview of the DBQnA service utilized across | ----------------- | -------------------------------------------------------- | -------- | --------------------------------------------------------------------------------------------------- | | dbqna-tgi-service | ghcr.io/huggingface/text-generation-inference:2.4.1-rocm | No | Specific to the TGI deployment, focuses on text generation inference using AMD GPU (ROCm) hardware. | | postgres | postgres:latest | No | Provides the relational database backend for storing and querying data used by the DBQnA pipeline. | -| text2sql | opea/text2sql:latest | No | Handles text-to-SQL conversion tasks. | +| text2sql | opea/text2query-sql:latest | No | Handles text-to-SQL conversion tasks. | | text2sql-react-ui | opea/text2sql-react-ui:latest | No | Provides the user interface for the DBQnA service. | diff --git a/DBQnA/docker_compose/amd/gpu/rocm/compose.yaml b/DBQnA/docker_compose/amd/gpu/rocm/compose.yaml index f9585acf00..deaf099774 100644 --- a/DBQnA/docker_compose/amd/gpu/rocm/compose.yaml +++ b/DBQnA/docker_compose/amd/gpu/rocm/compose.yaml @@ -47,12 +47,16 @@ services: - ./chinook.sql:/docker-entrypoint-initdb.d/chinook.sql text2sql: - image: opea/text2sql:latest + image: opea/text2query-sql:latest container_name: text2sql ports: - - "${DBQNA_TEXT_TO_SQL_PORT:-9090}:8080" + - "${DBQNA_TEXT_TO_SQL_PORT:-9090}:9097" environment: TGI_LLM_ENDPOINT: ${DBQNA_TGI_LLM_ENDPOINT} + TEXT2QUERY_COMPONENT_NAME: OPEA_TEXT2QUERY_SQL + depends_on: + - dbqna-tgi-service + - postgres text2sql-react-ui: image: opea/text2sql-react-ui:latest diff --git a/DBQnA/docker_compose/intel/cpu/xeon/README.md b/DBQnA/docker_compose/intel/cpu/xeon/README.md index 1e816fed2a..08b57c4341 100644 --- a/DBQnA/docker_compose/intel/cpu/xeon/README.md +++ b/DBQnA/docker_compose/intel/cpu/xeon/README.md @@ -9,13 +9,17 @@ This document outlines the deployment process for DBQnA application which helps This section describes how to quickly deploy and test the DBQnA service manually on Intel® Xeon® platform. The basic steps are: -1. [Access the Code](#access-the-code) -2. [Generate a HuggingFace Access Token](#generate-a-huggingface-access-token) -3. [Configure the Deployment Environment](#configure-the-deployment-environment) -4. [Deploy the Service Using Docker Compose](#deploy-the-service-using-docker-compose) -5. [Check the Deployment Status](#check-the-deployment-status) -6. [Test the Pipeline](#test-the-pipeline) -7. [Cleanup the Deployment](#cleanup-the-deployment) +- [Example DBQnA Deployment on Intel® Xeon® Platform](#example-dbqna-deployment-on-intel-xeon-platform) + - [DBQnA Quick Start Deployment](#dbqna-quick-start-deployment) + - [Access the Code](#access-the-code) + - [Generate a HuggingFace Access Token](#generate-a-huggingface-access-token) + - [Configure the Deployment Environment](#configure-the-deployment-environment) + - [Deploy the Service Using Docker Compose](#deploy-the-service-using-docker-compose) + - [Check the Deployment Status](#check-the-deployment-status) + - [Test the Pipeline](#test-the-pipeline) + - [Cleanup the Deployment](#cleanup-the-deployment) + - [DBQnA Docker Compose Files](#dbqna-docker-compose-files) + - [DBQnA Service Configuration](#dbqna-service-configuration) ### Access the Code @@ -80,7 +84,7 @@ CONTAINER ID IMAGE 2728db31368b opea/text2sql-react-ui:latest "nginx -g 'daemon of…" 9 minutes ago Up 9 minutes 0.0.0.0:5174->80/tcp, :::5174->80/tcp dbqna-xeon-react-ui-server 0ab75b92c300 postgres:latest "docker-entrypoint.s…" 9 minutes ago Up 9 minutes 0.0.0.0:5442->5432/tcp, :::5442->5432/tcp postgres-container 2662a69b515b ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu "text-generation-lau…" 9 minutes ago Up 9 minutes 0.0.0.0:8008->80/tcp, :::8008->80/tcp tgi-service -bb44512be80e opea/text2sql:latest "python opea_text2sq…" 9 minutes ago Up 9 minutes 0.0.0.0:9090->8080/tcp, :::9090->8080/tcp text2sql-service +bb44512be80e opea/text2query-sql:latest "python opea_text2sq…" 9 minutes ago Up 9 minutes 0.0.0.0:9090->8080/tcp, :::9090->8080/tcp text2sql-service ``` ### Test the Pipeline @@ -88,10 +92,11 @@ bb44512be80e opea/text2sql:latest Once the DBQnA service are running, test the pipeline using the following command: ```bash -curl http://${host_ip}:9090/v1/text2sql\ +url="postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${host_ip}:5442/${POSTGRES_DB}" +curl --connect-timeout 5 --max-time 120000 http://${host_ip}:9090/v1/text2query\ -X POST \ - -d '{"input_text": "Find the total number of Albums.","conn_str": {"user": "'${POSTGRES_USER}'","password": "'${POSTGRES_PASSWORD}'","host": "'${host_ip}'", "port": "5442", "database": "'${POSTGRES_DB}'"}}' \ - -H 'Content-Type: application/json' + -d '{"query": "Find the total number of Albums.","conn_type": "sql", "conn_url": "'${url}'", "conn_user": "'${POSTGRES_USER}'","conn_password": "'${POSTGRES_PASSWORD}'","conn_dialect": "postgresql" }' \ + -H 'Content-Type: application/json') ``` ### Cleanup the Deployment @@ -121,7 +126,7 @@ The compose.yaml is default compose file using tgi as serving framework | -------------------------- | ------------------------------------------------------------- | | tgi-service | ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu | | postgres | postgres:latest | -| text2sql | opea/text2sql:latest | +| text2sql | opea/text2query-sql:latest | | dbqna-xeon-react-ui-server | opea/text2sql-react-ui:latest | ## DBQnA Service Configuration @@ -132,5 +137,5 @@ The table provides a comprehensive overview of the DBQnA service utilized across | -------------------------- | ------------------------------------------------------------- | -------- | --------------------------------------------------------------------------------------------------- | | tgi-service | ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu | No | Specific to the TGI deployment, focuses on text generation inference using AMD GPU (ROCm) hardware. | | postgres | postgres:latest | No | Provides the relational database backend for storing and querying data used by the DBQnA pipeline. | -| text2sql | opea/text2sql:latest | No | Handles text-to-SQL conversion tasks. | +| text2sql | opea/text2query-sql:latest | No | Handles text-to-SQL conversion tasks. | | dbqna-xeon-react-ui-server | opea/text2sql-react-ui:latest | No | Provides the user interface for the DBQnA service. | diff --git a/DBQnA/docker_compose/intel/cpu/xeon/compose.yaml b/DBQnA/docker_compose/intel/cpu/xeon/compose.yaml index cb9339b834..b12eb32f3f 100644 --- a/DBQnA/docker_compose/intel/cpu/xeon/compose.yaml +++ b/DBQnA/docker_compose/intel/cpu/xeon/compose.yaml @@ -31,12 +31,16 @@ services: - ./chinook.sql:/docker-entrypoint-initdb.d/chinook.sql text2sql-service: - image: ${REGISTRY:-opea}/text2sql:${TAG:-latest} + image: ${REGISTRY:-opea}/text2query-sql:${TAG:-latest} container_name: text2sql-service ports: - - "${TEXT2SQL_PORT}:8080" + - "${TEXT2SQL_PORT}:9097" environment: - TGI_LLM_ENDPOINT=${TGI_LLM_ENDPOINT} + - TEXT2QUERY_COMPONENT_NAME=OPEA_TEXT2QUERY_SQL + depends_on: + - tgi-service + - postgres dbqna-xeon-react-ui-server: image: ${REGISTRY:-opea}/text2sql-react-ui:${TAG:-latest} diff --git a/DBQnA/docker_image_build/build.yaml b/DBQnA/docker_image_build/build.yaml index 11d7f518b6..39d00f5916 100644 --- a/DBQnA/docker_image_build/build.yaml +++ b/DBQnA/docker_image_build/build.yaml @@ -2,22 +2,22 @@ # SPDX-License-Identifier: Apache-2.0 services: - text2sql: + text2query-sql: build: context: GenAIComps - dockerfile: comps/text2sql/src/Dockerfile + dockerfile: comps/text2query/src/Dockerfile args: IMAGE_REPO: ${REGISTRY:-opea} BASE_TAG: ${TAG:-latest} http_proxy: ${http_proxy} https_proxy: ${https_proxy} no_proxy: ${no_proxy} - image: ${REGISTRY:-opea}/text2sql:${TAG:-latest} + image: ${REGISTRY:-opea}/text2query-sql:${TAG:-latest} text2sql-react-ui: build: context: ../ui dockerfile: ./docker/Dockerfile.react args: texttosql_url: ${build_texttosql_url} - extends: text2sql + extends: text2query-sql image: ${REGISTRY:-opea}/text2sql-react-ui:${TAG:-latest} diff --git a/DBQnA/tests/test_compose_on_rocm.sh b/DBQnA/tests/test_compose_on_rocm.sh index e2dc0b81d6..1230889dbe 100644 --- a/DBQnA/tests/test_compose_on_rocm.sh +++ b/DBQnA/tests/test_compose_on_rocm.sh @@ -48,9 +48,10 @@ function start_services() { } function validate_microservice() { - result=$(http_proxy="" curl --connect-timeout 5 --max-time 120000 http://${ip_address}:${DBQNA_TEXT_TO_SQL_PORT}/v1/text2sql \ + url="postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${ip_address}:5442/${POSTGRES_DB}" + result=$(http_proxy="" curl --connect-timeout 5 --max-time 120000 http://${ip_address}:$TEXT2SQL_PORT/v1/text2query\ -X POST \ - -d '{"input_text": "Find the total number of Albums.","conn_str": {"user": "'${POSTGRES_USER}'","password": "'${POSTGRES_PASSWORD}'","host": "'${ip_address}'", "port": "5442", "database": "'${POSTGRES_DB}'" }}' \ + -d '{"query": "Find the total number of Albums.","conn_type": "sql", "conn_url": "'${url}'", "conn_user": "'${POSTGRES_USER}'","conn_password": "'${POSTGRES_PASSWORD}'","conn_dialect": "postgresql" }' \ -H 'Content-Type: application/json') if echo "$result" | jq -e '.result.output' > /dev/null 2>&1; then diff --git a/DBQnA/tests/test_compose_on_xeon.sh b/DBQnA/tests/test_compose_on_xeon.sh index 97cfbab7a4..976df29ce7 100755 --- a/DBQnA/tests/test_compose_on_xeon.sh +++ b/DBQnA/tests/test_compose_on_xeon.sh @@ -48,9 +48,10 @@ function start_services() { } function validate_microservice() { - result=$(http_proxy="" curl --connect-timeout 5 --max-time 120000 http://${ip_address}:$TEXT2SQL_PORT/v1/text2sql\ + url="postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${ip_address}:5442/${POSTGRES_DB}" + result=$(http_proxy="" curl --connect-timeout 5 --max-time 120000 http://${ip_address}:$TEXT2SQL_PORT/v1/text2query\ -X POST \ - -d '{"input_text": "Find the total number of Albums.","conn_str": {"user": "'${POSTGRES_USER}'","password": "'${POSTGRES_PASSWORD}'","host": "'${ip_address}'", "port": "5442", "database": "'${POSTGRES_DB}'" }}' \ + -d '{"query": "Find the total number of Albums.","conn_type": "sql", "conn_url": "'${url}'", "conn_user": "'${POSTGRES_USER}'","conn_password": "'${POSTGRES_PASSWORD}'","conn_dialect": "postgresql" }' \ -H 'Content-Type: application/json') if echo "$result" | jq -e '.result.output' > /dev/null 2>&1; then diff --git a/DBQnA/ui/react/src/App.test.tsx b/DBQnA/ui/react/src/App.test.tsx index 5b346e8cb1..db16928d3a 100644 --- a/DBQnA/ui/react/src/App.test.tsx +++ b/DBQnA/ui/react/src/App.test.tsx @@ -21,21 +21,17 @@ const getHostIP = () => { test('testing api with dynamic host', async () => { // Get the dynamic host IP const host = await getHostIP(); - const endpointUrl = `http://${host}:9090/v1/text2sql`; - - const formData = { - user: 'postgres', - database: 'chinook', - host: host, - password: 'testpwd', - port: '5442', - }; - + const endpointUrl = `http://${host}:9090/v1/text2query`; + const connUrl = `postgresql://postgres:testpwd@${host}:5442/chinook`; const question = "Find the total number of invoices."; const payload = { - input_text: question, - conn_str: formData, + query: question, + conn_type: "sql", + conn_url: connUrl, + conn_user: "postgres", + conn_password: "testpwd", + conn_dialect: "postgresql", }; const response = await axios.post(endpointUrl, payload); @@ -47,6 +43,6 @@ test('testing api with dynamic host', async () => { expect(result.hasOwnProperty('sql')).toBe(true); expect(result.hasOwnProperty('output')).toBe(true); expect(result.hasOwnProperty('input')).toBe(true); - expect(result.input.input_text).toBe(question); + expect(result.input.query).toBe(question); }, apiTimeOutInSeconds * 1000); diff --git a/DBQnA/ui/react/src/components/DbConnect/DBConnect.tsx b/DBQnA/ui/react/src/components/DbConnect/DBConnect.tsx index bd41665e73..41e1a5b114 100644 --- a/DBQnA/ui/react/src/components/DbConnect/DBConnect.tsx +++ b/DBQnA/ui/react/src/components/DbConnect/DBConnect.tsx @@ -42,8 +42,15 @@ const DBConnect: React.FC = () => { e.preventDefault(); try { let api_response: Record; - let unifiedConnData = {"conn_str":formData}; - api_response = await axios.post(`${TEXT_TO_SQL_URL}/postgres/health`, unifiedConnData); + let connUrl = `postgresql://${formData.user}:${formData.password}@${formData.host}:${formData.port}/${formData.database}`; + let unifiedConnData = { + conn_type: "sql", + conn_url: connUrl, + conn_user: formData.user, + conn_password: formData.password, + conn_dialect: "postgresql", + }; + api_response = await axios.post(`${TEXT_TO_SQL_URL}/db/health`, unifiedConnData); setSqlStatus(null); setSqlError(null); @@ -74,13 +81,18 @@ const DBConnect: React.FC = () => { e.preventDefault(); setIsLoading(true); try { + const connUrl = `postgresql://${formData.user}:${formData.password}@${formData.host}:${formData.port}/${formData.database}`; const payload = { - input_text: question, - conn_str: formData, + query: question, + conn_type: "sql", + conn_url: connUrl, + conn_user: formData.user, + conn_password: formData.password, + conn_dialect: "postgresql", }; let api_response: Record; - api_response = await axios.post(`${TEXT_TO_SQL_URL}/text2sql`, payload); + api_response = await axios.post(`${TEXT_TO_SQL_URL}/text2query`, payload); setSqlQuery(api_response.data.result.sql); // Assuming the API returns an SQL query setQueryOutput(api_response.data.result.output); From aee357f3c842ab9a90c507b7689b3d3cb15f1ebf Mon Sep 17 00:00:00 2001 From: Zhu Yongbo Date: Mon, 29 Sep 2025 09:40:36 +0800 Subject: [PATCH 30/78] add EC-RAG feature (#2280) Signed-off-by: Yongbozzz --- EdgeCraftRAG/Dockerfile.server | 39 +- EdgeCraftRAG/README.md | 6 + EdgeCraftRAG/assets/img/kbadmin_index.png | Bin 0 -> 44330 bytes EdgeCraftRAG/assets/img/kbadmin_kb.png | Bin 0 -> 21912 bytes EdgeCraftRAG/assets/img/kbadmin_type.png | Bin 0 -> 25996 bytes .../docker_compose/intel/gpu/arc/README.md | 87 ++-- .../docker_compose/intel/gpu/arc/compose.yaml | 2 - .../intel/gpu/arc/compose_vllm.yaml | 5 +- .../intel/gpu/arc/compose_vllm_b60.yaml | 186 +++++++++ .../intel/gpu/arc/multi-arc-yaml-generator.sh | 5 +- EdgeCraftRAG/docs/API_Guide.md | 15 + EdgeCraftRAG/docs/Advanced_Setup.md | 21 +- EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md | 20 + EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py | 17 + EdgeCraftRAG/edgecraftrag/api/v1/data.py | 73 ++-- .../edgecraftrag/api/v1/knowledge_base.py | 366 +++++++++++++---- EdgeCraftRAG/edgecraftrag/api/v1/model.py | 7 +- EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py | 84 ++-- EdgeCraftRAG/edgecraftrag/api/v1/prompt.py | 2 + EdgeCraftRAG/edgecraftrag/api_schema.py | 13 +- EdgeCraftRAG/edgecraftrag/base.py | 17 +- .../edgecraftrag/components/benchmark.py | 11 +- .../edgecraftrag/components/generator.py | 108 ++--- .../edgecraftrag/components/indexer.py | 54 ++- .../edgecraftrag/components/knowledge_base.py | 111 ++++- .../edgecraftrag/components/node_parser.py | 22 + .../edgecraftrag/components/pipeline.py | 197 +++++---- .../edgecraftrag/components/postprocessor.py | 2 +- .../components/query_preprocess.py | 89 ++-- .../edgecraftrag/components/retriever.py | 153 ++++++- .../edgecraftrag/controllers/compmgr.py | 29 ++ .../edgecraftrag/controllers/filemgr.py | 58 +-- .../controllers/knowledge_basemgr.py | 61 ++- .../edgecraftrag/controllers/pipelinemgr.py | 4 +- EdgeCraftRAG/edgecraftrag/requirements.txt | 12 +- EdgeCraftRAG/edgecraftrag/utils.py | 11 +- EdgeCraftRAG/nginx/nginx-conf-generator.sh | 2 +- .../tests/test_compose_vllm_on_arc.sh | 2 +- .../tests/test_compose_vllm_on_arc_b60.sh | 173 ++++++++ EdgeCraftRAG/tools/quick_start.sh | 306 +++++++++++++- EdgeCraftRAG/ui/vue/.env.development | 4 +- EdgeCraftRAG/ui/vue/components.d.ts | 2 + EdgeCraftRAG/ui/vue/index.html | 1 + EdgeCraftRAG/ui/vue/nginx.conf | 2 +- EdgeCraftRAG/ui/vue/package.json | 1 - EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts | 11 +- .../ui/vue/src/api/knowledgeBase/index.ts | 89 +++- EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts | 1 - EdgeCraftRAG/ui/vue/src/api/request.ts | 5 +- .../ui/vue/src/assets/iconFont/iconfont.css | 14 +- .../ui/vue/src/assets/iconFont/iconfont.js | 22 +- .../ui/vue/src/assets/iconFont/iconfont.json | 14 + .../ui/vue/src/assets/iconFont/iconfont.ttf | Bin 14368 -> 14948 bytes .../ui/vue/src/assets/iconFont/iconfont.woff | Bin 9284 -> 9692 bytes .../ui/vue/src/assets/iconFont/iconfont.woff2 | Bin 7836 -> 8208 bytes EdgeCraftRAG/ui/vue/src/components.d.ts | 40 ++ .../ui/vue/src/components/PartialLoading.vue | 70 ++++ EdgeCraftRAG/ui/vue/src/i18n/en.ts | 119 +++++- EdgeCraftRAG/ui/vue/src/i18n/zh.ts | 125 +++++- EdgeCraftRAG/ui/vue/src/layout/Header.vue | 2 +- EdgeCraftRAG/ui/vue/src/layout/Main.vue | 5 +- EdgeCraftRAG/ui/vue/src/router/routes.ts | 30 +- EdgeCraftRAG/ui/vue/src/store/chatbot.ts | 1 + EdgeCraftRAG/ui/vue/src/theme/common.less | 28 +- EdgeCraftRAG/ui/vue/src/theme/variables.less | 3 +- EdgeCraftRAG/ui/vue/src/types/global.d.ts | 13 +- EdgeCraftRAG/ui/vue/src/utils/clipboard.ts | 100 +++++ EdgeCraftRAG/ui/vue/src/utils/notification.ts | 17 +- EdgeCraftRAG/ui/vue/src/utils/other.ts | 4 +- EdgeCraftRAG/ui/vue/src/utils/validate.ts | 9 +- .../views/chatbot/components/Chatbot/Chat.vue | 136 +++++- .../components/Chatbot/ConfigDrawer.vue | 6 +- .../components/Chatbot/MessageItem.vue | 209 +++++++++- .../chatbot/components/Chatbot/SseService.ts | 55 ++- .../KnowledgeBase/DetailComponent.vue | 93 +++++ .../Experience/ExperienceDetail.vue | 225 ++++++++++ .../KnowledgeBase/Experience/ImportDialog.vue | 100 +++++ .../KnowledgeBase/Experience/UpdateDialog.vue | 387 ++++++++++++++++++ .../KnowledgeBase/Experience/index.ts | 7 + .../KnowledgeBase/KnowledgeDetail.vue | 336 +++++++++++++++ .../KnowledgeBase/KnowledgeDetial.vue | 302 -------------- .../KnowledgeBase/SelectTypeDialog.vue | 134 ++++++ .../components/KnowledgeBase/UpdateDialog.vue | 127 +++++- .../chatbot/components/KnowledgeBase/index.ts | 5 +- .../components/KnowledgeBase/index.vue | 172 ++++++-- .../vue/src/views/chatbot/components/index.ts | 4 +- .../ui/vue/src/views/chatbot/index.vue | 18 +- EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts | 3 + EdgeCraftRAG/ui/vue/src/views/error/404.vue | 11 +- EdgeCraftRAG/ui/vue/src/views/main/index.vue | 6 +- .../pipeline/components/DetailDrawer.vue | 31 +- .../pipeline/components/ImportDialog.vue | 12 + .../views/pipeline/components/QuickStart.vue | 2 +- .../src/views/pipeline/components/Table.vue | 11 +- .../components/UpdateDialog/Activated.vue | 2 +- .../components/UpdateDialog/Basic.vue | 26 +- .../components/UpdateDialog/CreateDialog.vue | 21 +- .../components/UpdateDialog/EditDialog.vue | 6 +- .../components/UpdateDialog/Generator.vue | 182 ++++---- .../components/UpdateDialog/Indexer.vue | 262 +++++++++--- .../components/UpdateDialog/NodeParser.vue | 95 ++++- .../components/UpdateDialog/PostProcessor.vue | 37 +- .../components/UpdateDialog/Retriever.vue | 135 ++++-- .../ui/vue/src/views/pipeline/enum.ts | 15 + .../ui/vue/src/views/pipeline/index.vue | 14 +- .../ui/vue/src/views/pipeline/type.ts | 3 +- 106 files changed, 5084 insertions(+), 1208 deletions(-) create mode 100644 EdgeCraftRAG/assets/img/kbadmin_index.png create mode 100644 EdgeCraftRAG/assets/img/kbadmin_kb.png create mode 100644 EdgeCraftRAG/assets/img/kbadmin_type.png create mode 100644 EdgeCraftRAG/docker_compose/intel/gpu/arc/compose_vllm_b60.yaml mode change 100644 => 100755 EdgeCraftRAG/edgecraftrag/requirements.txt create mode 100755 EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh create mode 100644 EdgeCraftRAG/ui/vue/src/components/PartialLoading.vue create mode 100644 EdgeCraftRAG/ui/vue/src/utils/clipboard.ts create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/DetailComponent.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ExperienceDetail.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ImportDialog.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/UpdateDialog.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/index.ts create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetail.vue delete mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetial.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/SelectTypeDialog.vue diff --git a/EdgeCraftRAG/Dockerfile.server b/EdgeCraftRAG/Dockerfile.server index 4ac52700a8..119b0ec3f9 100755 --- a/EdgeCraftRAG/Dockerfile.server +++ b/EdgeCraftRAG/Dockerfile.server @@ -1,30 +1,17 @@ FROM python:3.11-slim SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \ - libjemalloc-dev \ - libmagic1 \ - libglib2.0-0 \ - poppler-utils \ - tesseract-ocr - -RUN apt-get update && apt-get install -y gnupg wget git -RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \ - gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg -RUN echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | \ - tee /etc/apt/sources.list.d/intel-gpu-jammy.list -RUN apt-get update && apt-get install -y \ - intel-opencl-icd intel-level-zero-gpu \ - intel-level-zero-gpu-raytracing \ - intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \ - libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ - libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ - mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo +RUN apt-get update && apt-get install -y gnupg2 wget git +RUN apt-get remove -y libze-intel-gpu1 libigc1 libigdfcl1 libze-dev || true; \ + apt-get update; \ + apt-get install -y curl +RUN curl -sL 'https://keyserver.ubuntu.com/pks/lookup?fingerprint=on&op=get&search=0x0C0E6AF955CE463C03FC51574D098D70AFBE5E1F' | tee /etc/apt/trusted.gpg.d/driver.asc +RUN echo -e "Types: deb\nURIs: https://ppa.launchpadcontent.net/kobuk-team/intel-graphics/ubuntu/\nSuites: plucky\nComponents: main\nSigned-By: /etc/apt/trusted.gpg.d/driver.asc" > /etc/apt/sources.list.d/driver.sources +RUN apt-get update && apt-get install -y libze-intel-gpu1 libze1 intel-metrics-discovery intel-opencl-icd clinfo intel-gsc && apt-get install -y libze-intel-gpu1 libze1 intel-metrics-discovery intel-opencl-icd clinfo intel-gsc && apt-get install -y libze-dev intel-ocloc libze-intel-gpu-raytracing RUN useradd -m -s /bin/bash user && \ mkdir -p /home/user && \ - chown -R user /home/user/ + chown -R user /home/user/ RUN mkdir /templates && \ chown -R user /templates @@ -33,17 +20,19 @@ RUN chown -R user /templates/default_prompt.txt COPY ./edgecraftrag /home/user/edgecraftrag -RUN mkdir -p /home/user/ui_cache +RUN mkdir -p /home/user/ui_cache ENV UI_UPLOAD_PATH=/home/user/ui_cache USER user WORKDIR /home/user/edgecraftrag -RUN pip install --no-cache-dir --upgrade pip setuptools==70.0.0 && \ - pip install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu -r requirements.txt +RUN pip3 install --no-cache-dir --upgrade setuptools==70.0.0 --break-system-packages && \ + pip3 install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu -r requirements.txt --break-system-packages + +RUN pip3 install --no-cache-dir docarray==0.40.0 --break-system-packages WORKDIR /home/user/ RUN git clone https://github.com/openvinotoolkit/openvino.genai.git genai ENV PYTHONPATH="$PYTHONPATH:/home/user/genai/tools/llm_bench" -ENTRYPOINT ["python", "-m", "edgecraftrag.server"] \ No newline at end of file +ENTRYPOINT ["python3", "-m", "edgecraftrag.server"] diff --git a/EdgeCraftRAG/README.md b/EdgeCraftRAG/README.md index 314feb3480..ba30b7bf4c 100755 --- a/EdgeCraftRAG/README.md +++ b/EdgeCraftRAG/README.md @@ -5,6 +5,12 @@ Retrieval-Augmented Generation system for edge solutions. It is designed to curate the RAG pipeline to meet hardware requirements at edge with guaranteed quality and performance. +## What's New + +1. Support Intel Arc B60 for model inference +2. support KBadmin for knowledge base management +3. support Experience Injection module in UI + ## Table of contents 1. [Architecture](#architecture) diff --git a/EdgeCraftRAG/assets/img/kbadmin_index.png b/EdgeCraftRAG/assets/img/kbadmin_index.png new file mode 100644 index 0000000000000000000000000000000000000000..7383a01c796d84f0d3d49a112d87e1e71a21efcc GIT binary patch literal 44330 zcmce-2T+sU*Ds0%PcM?g13J3nHgt5yZk=ENS{zg)8h~F%JPkDN(G~Sxzykjqw^!CyrlTu~V%~p35Bz`9 z(($tya*?b?n~tG+N)Z<@~JrJXBvL*f-%zsP;*qW>2t00;+s z@)ECTM2-z+Yp%9%SpU=dc?=0d_9WbFV~RY_+7@Z!@!SOC{!Y{axo>KmxW*H zLOA@k?wyTykcFG6C2PP+0~1h^6UTcW%8^#oWq!n(cMM5dE|d-2^-!SgZ-GyE;HwuW zMjHq+E&skf!Aj-9> zxa!VsL-5m6HnQ&1E|pU+!o(~px)Y@xpJFI@-_^avRA0C_l{AW+n2Q3rrIM!>zkQbs zsL!>6QM`$7Rxn5qry)f>ob!o$-Nt*Kb4m~j&f510DomomLDb}%$B@_R#4sAV*&zf6 z*ZR0wPQ7@-dpP=iwYK%O2C`dFQ!Frk@>5Mk)a=|Mh*k}^s-Mugv!NQWI}^MbfFY-N zuZ#@@Nu1)5E$378A6u=&;PHJN+DWqBVnOoW%eMT`-E<0N4aSQ9v$@#7y)Nqt;FO8Y zd9*M}C2DLvQ*n9Ht*}-%XPzwlQvXEeFM(ta{qgy(_{m?5Z^5oyh>SBk2m^Smb5V6G zX&XC-uLU;Jbs$46oAO=tQgEF2-h6y5w!FJIa9&i_b)tOGP1(I+F9)&_W?de_CR8&C z+n?h{dvWKIuMV{p<~{34tQJ>(0);e1@$*%BVGWX`Y~Ba(nfFORu$r>o%SNNC*B-)M z-Ku67OjFf|3S^^v8ayHrRnNkkBY728O1s28yZq*3O{ZRz-$82A)`LNxC(p|u(1*(w zt#vHH^2F1t6_VBFrq&sadmFDPABGqD?8e+2f(Qhv)&B&wsY|FIVC zX2dJ+@#0iRYb`>KnolUqf#_|gN3@(nm|*ma&G)-1^k5GE>IlhD2x2w}pxnBMS$#c1M~ z+u_V!VviE4Z@N z%Ncq(9is8?jT_l}CUEolxb%xEYA+71A0tt)(s#{jvH?=doPc^`59n5(A#J(p$Lf9d zs}l$}dDlutzL!6^Wt@J$;f5EFSl|jq?>-5Kfkoyrqm*92JX5$@ZDKz6*3%X)6{@_q2L;D zqoSH+kx{34K&4bBxi^f9iSi;)1K98E9v|*;!sE@q3XkmqT4JWP&^h{El0@JPb5THa z?ha_H`zE?vKETNCTEBtll>rj&xh)ZocC#=h`W))ri^&cbV>Fe+IV?CmfoojbrPUOb zXQ*?##k|6WYf8lNWBSdTA*{0dk{-YAPJVy#Hc4H~N>xnKi$O>`U@k&#LLlC9c6r1h zQOjt1V^F{HGUD`L+N;?bT#=`kmJ)5RkI^t|L067c@g?scZ;8zyx4zqp z7syex-KckOYi9G*!b5)+47U&If>s&WlZVFxmq&}&;y&uA@ky{OGi2#qb6M<7u7Fbf zIR>5EGG}|!mIt$_{4Mg($B6Ui!Si0mJsjRx{q^Mp?1a1M|utZ!axK>^9NBekNm#b@)ztvwM%)GY%-&Z= zj#)aSo#&RPFYApv$8zeEzT6e|1!Iq`W`1hF>y)NMJ*r_lTVlMjVFx~#@!csQeYSL& zdndv@uvvR+xEMM-Y^jskz6o%Tu-qx6kJ^KCw9yKx} zYP}cAtZ>dN|9qr`*S8U2rz2UC4<|_(Vd5gm(Ec=K0uf0=Yn#TraI8Hv2+I|=E&iTj z(IGP7`kE){FD;9BwXmC3m0k5?a`4|?dU8cQT=*25f&O~h<3+-xyRZGWK64UaN^6QV zFjPV%IzL4Xu8NvazZ7O(llSOb?@?*n_S2P0`bSZX-0g;P9O?#)a)N!LtBPFvK_sVM}^^K=7V`3N_zF?EtrQtTTcAh>l#tB({u9% z7Y#k6r`(U?c-g=G^dLrXTu5o-r0=1*d=(fc{^4X{d-?DxzIM&$l+aUGu0Nq0Me?=KR%!nAwzP&NI0b@Tw` z0w1&gz@#hx_VkMLFKGMu;lIU8{~UI3H~c?{OZ7kF{J-i-WUR`MJn-ue-l219FNSV- z07O=f91QTFifN95!9V{GXlzBaiY& z9`)+0la;_MQG5sf$!daqN(tZ%(AzwecIf{hj@*AiM&&B8hbx0C{8XcXjel1MDc@|6-on=jITvE0M5nLbcuZc zJYgs&w6ly0O=O)=!lEJ~Os#1}o#T?v+L*{Yg#oK?Bz8)MlIZ6$l*U|!XmwNa$)F!f zqc=E%zkU*Z63*g5-H3vX0Vd3aLfUb&%+*Q$W{rdrI&`5o=9O8Oi$K8l1ORS#s7GFK zOjhvqnFGLu&0Jelr37rR%cnyizUuj1ZPHR^)XGN0&aVOZYT1|Tj|)wUQ+OC{?~$jw6K<{!n_43P8^>s)DeX2j)(fHqk~>Ws zX5Vn3#NSCFqNm@j)on#Ob3G?vb8C&xc@Om_E4WToJ;z5B@TZd))whGF0GJ!cqieyO zo=qo(sy3=SK~Y=13Q_z$v7Z4SnEZxvacT%8=6{oIDlCmx<)i+}kcAuCK&u zije||A}mm_L_JQ3&|lqn|5o+sR$(tHpxEtrWL-Qk?$j-v^m22C3df;V-ratt47{Q9 z8xUarD_kdKeJvP~2ObVEd_14a5@rU#<`w|h0Y4UkTU1sY;N}s))$`@a(>cJMs}XY3 z9;qr!?;*?yUV%9J)xH=(?b0>KT0mgrr7QBPR;9Kf6hb9Z%=JlZxcMpQq5`HQMKNH| zAmE0P=Afwk9Z03|X4ZHSn*0fSa{=ohUH}tYws^3C_?9-no;`>eC zWA@JB-z7h9w`^Q$<;sWfU*&9k$b7_e4RqBs?CE^eadZAt~l z%5n0>?~equPverv&l>J5>pd9`T)a z%uQY>vTuS>>u6MRwpu10i zjsL>qhNSoq=WM&bNcn8_$W2ZloL5_U+^^Sz?)<~AG$mu-^=>*aN{N)m!}s#n5#Q)y zz}^cj=g>2suVTea2-hK<^q5NqGBs-X*V_fiuDxpu^vdBEDui-+5UY)1cej&eZCd}~ z_$lnLH?^o_n$wb7zWkUdDLrjzieQIr zo&!aioB$r z)|Coo4m@qLXLSXXx(|58KF0^ZOLx}s(M37Z$;pEM~q^K)jM&Rc@Fi_@lW?a#$PNM{O0bV(V--N9}}`j>13 zx|~WQFO=>{_!QR&v6C%n;y!gn-(I#r8&DBPFR))5ZCqqGwVa`*n^n4jJ#@`@6@1Ez zA*;^#T5Kw9-(Ip}b9Q&CpP1Z0bYM5U?R2WZ^-G7|a9o|!nM@B=vPuQ?*A_T#5;bvm z$4W#KVF%sL2fk& zoOjxb$1Z+}Jhol$kIjy+nH=U~@LVxfINh3+2JMqWdl@=jE_zzz&sJde(dm6&eg!G@ zy`d*)so9b~S&w{;cNFAvfKPe6z;gxnKqA{~p-groKBVYT)_B{X$d9LHOr+r70@id0 zw~1ga9Os41XJaBBak##Uowud{msIAXDOHrKroAi zi>kCYf=)-iU7Gr&w&h?YmU-j#!(yv?%dTH8-p-4)*DJAiZ|TP$W#2Tun=uk#y+6zG zwnRXz7A4TJWpDk^7^jamC}$Pp>D(*By%eGSL@f2A;nnI1m3G zT#PdBxQNt)R2qL*@qm9@axQi|KD@#Dq6RXIDL((0G&oR0BD3}I6kR!852`^f z^9KPDrg%K#m&8z~0OQfE0^@t|r`;8k#V}d9Dl3;3QJ=mhB|uk4ygkFUjH0Sz*P2oyQa9CY5@vV`O68XrXHKde`<$S%a!GUrio zId@|!@5qr#J_}6JlYu@78g(>4!Lm&i{0kTVxEafila16izn1^l?e1 zKlM_={T;Bt0LBsu3x}o(k*t{4m^%m>H0r~0-lnZw(y9=a&$|57j)DdpvtN26pBBTs zZDEP5rQkB(TDX?k&S>9A)R?@R%E7*O>MPzi1XCe`Ct+w5O!kn#M10{Uj}S@e#9Gt< zM0HZ6ZbfU_8L@K{e#2{m3IuGAS9TisqJb#iq)#I~M-|9{4IwyOXa1i0<9^GUr#wqP zwcxHf=H|r60MV*+^!`3E8Wuru&V@AblN}QV)+9H&b}#`05Vae7e1xuWd~gWi8Xbv4ZM~-s|c7qG9L7UR=ZCiji)*iaVbbc!Hphmm50wJ@a_M zj7y|VRv6b(m)cxl7tfBxFfwK$y@jTSdI{2Ao32MgI+lX7_zEv?{boxhttUDcJ%^`l zbF*H!)qkp{=y^@CNUjEo_1>!H&1c;n{O>y)QuT(+A$p|I2e`C`>>1ca@BGOLT40!% zc5d0zcd&OMRsoQ^IH-0mqaqOlg313$nqr4!yqbJ*8R)T)buloyfXE+AS5|7uW=Qr?@~V-CC@TAmADfSBa( zf5T&NafS!28XK8_hz?>;4TXw!{7iTaZc?&;+2C5AX%M+ z#bbAt2D8Hg0*CebiBT-_l(nx$#md{Y-C+YHu#B>iU9mVqa z>M%8LF|UzdSRe^B><&T~pT9D*rmbDL7N8g`x!OmnUoT6ge47doHN8fMawK}+t$o_U zRJm3TfV-;7r`sBNHDRj9W{qx8z!}FJ=%wU^3BZWH`o_??B`9KPGgX5daK&eu*wDGh z93zI5YZ$+O+3H zFai{el|eOJqRNaoXN~HnKBy_`yA5H*D$#DU$GMC{k)!rst(+k&`|x;wA4v$pwZyTPK{g`f3$cyFew z4$18e$CoPIr68SCl-8T(%IBQrF<(vG1!*b+hBv!B(%5n&93W>@*#?caev3k0Iwmaz z=KzzbDi{o|Z0O&<^u@_KK{22r0N%`BWFE}pR@K;p13mK2XWaq3lX$4qE_R<)Zh*1C zitOP`XT$no2ALu~!ICJnU!3Fq@B$v6 z4V&7rk*>2|Oq!F}&7n{uyH<#E!4tk9Ocz5XWU5V;7)iN9xy#xbtbf*j#oC2PRUb_pmmy9+34`AFi6;-Rx(v88OwfPieu=Z* zO-7S-9^lSb)+IIAv7*VxcqZYyz1gr5%yK}rl@NKr74j{Hw}OK&*|jk8TY0KtMa|0% z@M=1bfAz>MF0nR?%0cP`^=!Z@5mbWkn-)<&SC=Oc9{QvpJIqNWD}z|8WFxJsrouyE zUO+BxH(RGdNSAP+!tK56zMB!mx$WuE*Kd{hGZ~NTC)BMW{t?sc+D;Xn>3afys{|{* z4$<)^X`x<+CVwUK3fNhJld!_x}+YSB$1`Z3rVkI+X*F+fULY2U*$F z>lnmhZkZPmO4gm_9{-^8#lkEEVz2?)n=?1HF+tdhgxs~pV(3nx588V)mh5Sg+cd&Y z%6FJtzS)L^A<#R1=cgrp1FrP*p&*NHM{^|1_VReCw*@(QTE)DAa7-JQACw>IH^|Fl z`@&@gq|m%Zub2~QaZYevwCT$^6g~E{q0V!K`=#jstS_1mSNdMzGCo@)fjyNOcXI${ z`e}fpME9c12c_L+>rWYSQTO<1L20KotOvP^B!{vwZ9M+V+fx0WXTxG$Y2 z9RFUG=-6M!K97u!3A)Ig%UZO~Dy3VTY^72km(jHzWuVKbGqWzd-^i0QkG-8mKFbsw z?39s5Q4G`*$R`*}kf`hEEBm0^PoN6BZ!hWgX-4s%^529ai+tZeNH&5*P#1gRGu(vv}xr z2O(m5#Jvl;w1VAdQT|)+V#vbvd)xGByvPFgm6~T0<&JpNywiDZ-i6*|+`(z%>#^Wc z8aLVG&1NmuV!5~PoOlwFk84XJ2+^|PmnJl%6C7T0QsvYJR|~DSn;(JlwrwoQ*|d*E z{O4yY-;PMt_4#zYPk3Tt;pg@#H+JZ)c6T(&(Rzx|6r|=?xh@*ey;;SyMOm;hb*Ko)7e2ryC8m z+f!&sb+q3u;}1HT9(uh^2m3VDnaeUbbL|=gvk&Bii^(dNxX`&HV*VHtLap}p^fs+Z zWc@U4k6q~&L8Z#tPXy3yJIQ(cvGVs%g5J|gutBhYoYCbeN=rrUH6s;$mlfrJ#MtXx z>dQRgo=iVL4T`c;64>3CiZR+y3p-N1kkdx`F$!>$L!F{_$GJ;$ynsnbeh(5zs! zEd?#hB|`hIJzgIQdYfy6q_IRE0b~PA3knTx z9q&?AgT`kW4r6Xnf87q0cU$F{F&PLk`du%GSOq|AxwYNde_W=j1|ZXSh*O>?L7v;W zfdvK({yp1Dtd-ouX^^`DQu|VAu!xLQmDqH0MKoT#C`-ar*j!y}|0v|<`bVsy>W{hq#*N-`J=g!H^9{t^wlvrIG_&TI+HI^*j# zgDln&55pnh)agQ6@`5R?NtKqR8Xr}9?mhOHtbAU6^1<0j=^W%pO5DM=LALo%(B>{C zht}ngL|ft{)?Uz&I*5O_t^B}vl_p&7R*E59y;Cvit|mlw zGqMZDr}x|Lty!1cZi#P8)x)eL(-X?lpxsglVj44NHayLo(0)6^rjtN8>X%ZN5#uV5 zcW1%N_=3^y-l?P?2f?Y>0->`xeVn|Or1P(-TcZ9RD~yUh-WTW`xJdf>eYvVpbD**& zID^;Yob{GUs}IiLGBxmsv{BAN^1{ZJsTzO-Q;wc=4d9htLJYd|^WR6kbI?q%E$7ej z=S#pxWvtzb*lIMan$|4EK(TztiXr=W0{d{*I{Yl^&E2==2pnDYW#zXOrWtM*I*bQ;pi%0eN!v~_(y`s}pC*>rx35ld_wBZdrZ6`vd03yRqIXx_^|0%^u;dd^DFxba; zO)RvQx`!F>2gM#grxr2Hm|Fr~uUBwc?M@wA!Kn;rR#IijuaAset1?@aM~Gr8q+ux( z|F=AbPn)e>Hk*Fc40ZZ@8x|%u6MniIG3VHR^K__Jlv40|*ukzlS(!rp@vZG{fZskH z*|MHOXlmA-ypSxDf4NK9AUD}3k<6h*%P3$;q&_hgE>IrbQF$nKcSkqXJY-3WJC*|7 z$`Cs`x2rQ4Wj;Xk6OsR-e@0uI2*lH>`cl@BpS*X-L%}lxFw~CO12}5@Y7NAu?V9}I*d@&G#81l#M!0XV6dCOrir5fEN(;%ptq}wjQa$!|$sK9bHM(|` zo_#=^zFMnK?n2R0WhM2*Fn@MR?}^;L@21v}VJbk4nRJJ4CH+eI^w?}Gi?p(dz7<$l zZ&a}5`#^iYbpVI&^N-Q8!#H~<-=p__9X6 zAmFv3h45JZY zzCF)@aAs-^`RpkX5)b~F(V1m(Ve%l2ZP5_F&QSpU@g8$Q0=Zur(EO4bCTn)D2%7Kj zSs13h%a|Q!F$#VB>>g+;1xj)}2;HG80)Y_LL*(}zI|2{~n1-Bo>i4G6=Q>Km7Y6Fg z)$L%%mw&19*UXF>lfQ=U4PxdZ!wXA2QlmAqSv-~m%I>~lt^_YU@8%}mS z1@<;dO{_2_)!`T)bFWa=#f98sE8Cr^bl2;BQY-mkW^h5Di9R=O8*2)7Z6z{Qr~IbI zH%WzT+p}v!Z)rW^!}!f!OP=-B5ZUdSSw|S*!%oAC(g>^NPv^_7h`Mf9Wmn|x`eD3W zLC(Xasgzw@Pbdd+(2d%|G)fZ8oWCeWiTURChGZ|hf5m82MhAQqC1% zX}#Y*X__3Ogj5A_LVVRaeluF%ph^vwUaZqnOi`+Bjw>0;p`yoK@jyS=@YmQJ?|ei(J# zSEo-f3`!@ZY@tI8>eZ=b>6c$eC~=^UYQd;ZDeLkMv=cDovQ$Rf=}$`mw}9iQI^>Yl z`$g)?st>%2DbgfI%t_$g(8|QUq-WIqy&{M7L=REO8U~3J9n0EqdXpn}_p(z)j%R;r z;AThTmrWqt;%rzB${R#^w73j#cvoSPp%-x4xiAfBcth6q{0#QkRZFZjKi=tV(aaf@A?ViClJ7U2VFcW zMUr1q(QGukh5C?kNjYOE|71D!8j3L$x*e1o)j&v;9rq8xSntdvJMllLEOxSRYjdc|A@u84{9U&B)1YiizI#>wSgYD_!OC@A#@)QwDxvDM4!Pb&)2HHw*&*M_NI9C+Tr_*F+Q4LComtOtmVI`9q~ zm@syb(Ajaz33}L963~q>S%5i1tg#MQMK}@%Y>|BeHLGV`G3!u(n&}j{4T*L$s{sux zKJ8i{n(U4~ zG^odv!-Vy=))rNZ^xT=EWN7{jabj##;5uT#_ad#RydZ|BZ7 zqb~4wV)V)_B^D9S49Yu6K@}3Cp4es7gqm_!cD&Ux)i}=y!ugwl)|e&XD5GMuz5ue^ zkI#H_OMmO<5~Lt=Jzp2nDH9R_)z2h!RZFVNc9#{><^;%vM-%R!o@3gbzB7Q=Mrr0+ z$t7*>S8h_vd#kf#gQPN!(L=1Bakr=W4Djo~s9UM7lNGM`3iGo|{Cbqae0kmsP@dPT zeAs;jZ;=o=<6AqL)+B=<-2fKb*r~fkZZRY_))6JTGaNyGBCjm{% zCl1L4^07JENfQSoFx+^VP}_UNi6{R=U&)|p1;tD+f!Oo$Y)xNkgoK(o$4i=^lx3^h zUz(84Z=ZoWmyShX+k@(Y~;2){jTPUeKRNOBtk0u47()iZsC_2ZTl!A;%V=m5hOC> z*e5wm;LtE95I=m&!-$eb7>qXL_9Ztp-E*FBU1ctlmR+~=Fr;ywJHW9 zBrA+O|BD+KZ?-?{LCi1neLTP%1t=nEcK>oQzCLy4Ds$|LJbZ|b?ygU{`{a$P^B~L( z!HU1!mDB<4lYb};%>SoMmH$rm|BK7;f3d6Y?idZ9-|c7q5<6~Pw;<#ITneTCGyr^p z+|}K=B({GU+CM}7E*JwpY`=t8(f{+`2A0W6q5I3- zO;#4eAC+AGm;R$y21F2^))H5GD3Z@G4G>v?#KxoO?Mm~-6l zkjnIGA`LL=CVGJhv62hxvn_<;Asx?n;bMzQH(3}p&;rQn^BfS30mXm-B3H4IJn~)+ zk}`)|ns*8yemDf}FqN^jo~v+orx*Hqdg>3wsr{Hn&qReIuQjKq$AZ?RmEJG$?6kc0 zpc?}le-5zsz-`5v@r^dQD2Z^#TMp4O^=nF#6h!ejqujK^DH6duz6QQg12QbgzX2#} z-+c2M4UwGyq25KoXXQkWNctoZPA)%5PUfMT>;thz#$~Ker<0-^i@dxBE*7YPH#pVNKlF_aa#o->O zGhRJykG>=>X;$k4=gECMd%)l`TrnAPZjcP(PSi~gio24iquh(iHv~V*%&2J&X7xQ1 zel$AarhU!&Vy9@a*O2J0esucxwqQ{xU(kh_e${JZJEPY8Qw0~`Ne+s46yK+Wz2T`l_fT$=^JVzN|Z_g>pT z4Q81ZTV>1GH#GzN(k?_zJ`0l-Dnm`o!T@pnVQXc7N9&(;jV>-d+lrDrZAzwjnXu|W4R%kx zmOKAhcvGSzyli1Z%&W8ne<=3v!f%N1d`BiOqkdC#RFnPjL9TaAYVIwgk4~nBJdlQi z=Az9T^JKy$UKC#b!Sw9hn-A{UFL3b6xRc(}n45if*rp1g1&n1+ni+^+n}PzGGvF_c z-aVS>GByRzyV*OV5BYQL8s4(uXOflg=jNHLa#%B}!JkTtYt3|)^(oMtXgHmk>wB2x=hf*VX7M`WUNlfRgVHX>?%z1)0r0A=J+@HCy)VfF(JwIb7u6tt z;~%CoVI$4OG3H0E)oq}oN)i9gD)WHih|9}XkK$VuZ;Rx`i7N)(7rFHfZ-jyzigdB| zUrU!aDQ2_z`e;0$_>A*BBn}s)ov-POIX*u(<;9K_s^!F=g;E0A9)e?xM(uz~nIbps zjHdd-FD?@GZvI>J!hrf$f43j~YX$zlQ$61TI0B%g@%?2-e*LdD_Rnlf5^t}GB~e;y zE1s^(PHxS-eti8NzU&%z6N*aA4qUQo!gc-Vru zepxF4F&jb4zd12}vL}iee^7`;N9R4zThBj^!Hm1Wz7XS9sgIEnBp(=V0-N<~dOtc= zQeC+_VvhD0v|Fh}a9{LgZap*(uL3ux3Po2wE}kFD`e@x%c(*lJs$?WwKaBIuA2#h~ z(u*VW$2w1L>bpD%clw#LrCFj@wh~Mt}UkZ^By#V>sb-K=vusx(Z*#{-u ztlenkL8sw>IumJa@xM?6(Fv6QxB~PlS;xVv05x#Y1>ng5WO`4zrJ7c(l6Iz}r*`AO z)5aa{Vf$SP*T&yGZ!8W)pkwc2e%6=bdPo$90+i@SsY(z)W0l&w1A~?w(5;_!f0=Q! z_C|zE`{9mYC2C9af{`7VD@~az>3iw+b-w$f#PKE4}<$duhImHifqs6}Q zivHUk@gaFTnx!T;XBT(6I&;ix!4CbS|zRfd{8r~74h&c|E(Y#ksFAd&D$u>FrtW6AycB2>n80yPa(X*||Ci5_{0<`4G9 zjhF#tMsF=b>UTyg-0rJ&^_P~yM}nkgE()5v+W|ExK0vfCKIa9KBR~#bEE4^F8ZGCu zGB!x3aZveU>eVh+4;ZtL0IhYP2bVh($f~rPCIgUR(JTk-#+@|`2D~-`Ph$2(tvLQ} z|CqhyoCq*%tG2%80!4C@H=+P0u{&#>X!o&IFfJTtanFXT3XG}*6jGVMN4ULR`FX!fwqs?25 z%40u#lIY3wzrL)VHS|!!qTz#k$RG3^i3O-4G5}W_+}gvRn=4BB7@8`_5xM7VEGJ<6 z!bE?{_H{VZB2cL*TV;H&QbuXGH-p0#fRl~zPruZ?Z3308b-VH-pXnb?5G5G7_$8z4YQA%fQ^~H0 z#7vqRI!;hGWu+)Uq(Zbnm(40pg5i$?IGZ_mxxr@za1N*JP|3d2*!QaYcg)o@*tUKi z9HESx=dsd5OtwaMMpMPju_JN~8>fKFn=0B}h`-PDq$w9^@+0F-Lyhi`aNXWsAIxf@ zMeo;~#jjmE?DklqiM_&~#n@N|iaNl6KE#s%zBL%{H5uZ-tt|=&-%d}j;jOdCxy?n@ zw2<6;|LgqWgzvU&9ZmkQ_?nT=V|3Uf1E`TL8c_SfZF(IbF3OZgPrB9i$dtENNZGVj z89M*#fW7*buVt0yUv!|C4@xo7CcH^N?yVLF#6vvowa*S$>4$x>#Lmw)e=#5b*Q5O{ z5hMzv>uMnY$G%ciy!vOaJns4dGyZ^K>6Mz((emO-`M*ZrEJoMwh}a*cNLTt-yIxcW z>WtdtTx}JI&~yKKPJ24HUvIt%WV7HL0!mN`L|<9P%0JU;Fw{w_yNLODzV)BoZbw3} zpF913{bt$s&<-mSmqQ=_*`;>VPR9QjKHi;GDxuwz`2dI_W>N3JbMs&})Rz zkJ^;{Zw4P~Kc3s`U@s&M60iDGmw-|e87btJT29P5FU9n<#z+2Il^iIX>t-txGxHp) zO7VB|kIWz669Gy`Tm1jvm8+?q zZ^n9a%Ap$1z@Mqou%P24)xUv>qO{ZHZvhMaeqSZDcwh|s=VKF*8LqYRTDBvo`71mrU z-v&pGA>so^@O99~l7DtF53b|LZrQq-v3x<=~ux;A0B&$m(|HPt?q>xH$XJ-cg)>oJCiN^5r(x6v=F1YFWu zsbQPMDYwy6I2dSJr{1pu&g!<9lkKyWR_pR(Z7AzC*NZ;CexoF|sUklC9BixUm*Bw? zrHyp?bCx+yiYKkt(#k9vykCp?&-&DEYSzCA=v+oE91mC;ai941;EGFI5$lRjR6XaE z;WSVh!Okz4%Pry~$xAzO>u{;_3QPG~!vmhidE@&B-Kt4pjbCg8@9e?cPAJU%HX)3G zF%@kow89?Bq(b9ZsW|5zn}xy zdN;yLd+%gN@G5OS_b6QM97(ul!Jh++Ias8m{A7C8uZjMtW)7&L?g+9kOSiw|9(;Z= z>U^qLg6tjb@>|~nl(;KBoFsc}e1b>3cP%aK6jrBa1h!7?QYJgXm}?i!N@h!-3M6lF zaMWWnD1_}iN(^iYdRJ>GaP7^7ZbKhl%^YpBa=B8algwv?zC1JbFf}UI8R%hV<6zOD zO$eIulGXNHFTjJ&ff*2=4yKV-V<#7GRz1+WJSA9VJdNubslh)C`ReIU{$*|)dz`Hg zHa!}{^#p`39en@HBQ6a)my;MFlHg7l)x0Ob`#DuEJ68J|T=!;E(N}Ugx^@DcJhJIH z=bCrwxH%piW+|zCE<1#c(!~vNZKrmD?Y$viSM>q&sWwOSBHJeOtwY z^{-7wLYZ$uOY6PrrR!u-Q_hArz=iK{)liiVPw0$9djFFxkPB&$cu zrc?~Bym60%^l0o<4w?2LQmgIawG4fbw82k7BA>-ivrou{ zT325CI4Ya4JSJi3!9Fg|5q_saNhvVttp8VLbtg;&p6B2MzA9*U;v6a@cMy6>u8nZp zNQQX3dW4Ur2fW~HdD;@HX!ovvu9)VKbWT89>6QTNk%{qf@S8$MOVZxv%mX3ftw~qo zgl_^%6>MHYTm@G)^)AkM4Q#cmves%bu6okWIB{*V1~k@8L^U6|d$te?Qc^o@xbj9< zJ(Rz)bV&DF!-)|&*vnNqSrF5U9o8CyLm1JZeBaI@_8gg zB`a;XI||7@-n(jUL7Zi-TsbG^tOMG4_scxwHq0#Hr2=Eu*25H=Jrng1>&eXm8>Gij zafvX=C zUC|(mAOG+gH+a(_Lwk)MuXA>aEAD#q{WPs5{qCTD=I?zz9?FUz_Fmjh|7dcLJ$lba z6kG6H3z0$OedR|?I{a4WIQ8JrMs!QC2F};8jCo7ZuIf#(^tNq z&~}{AMj#EV&Q^Rv2q+ z)>yHp-BjR=;et)zwgOVlgm;`8Fku@mj7={3ZZm`W>1|ff>CM|I*?$5zh_TYtLuOqT zDMddt_Z60Asu8aC{^uSyV)}&nw&i)p*9M&kf01#h()SarZb>zc1@Qrq6DBA5JXgD~ ziiO@VXryGI>5^9YZstY&4!g=I<|lq?^y^7hE-&9ht8FU?=-}~OM5VLD-naVo&cSy< z{_cAt^nkfbWt);;7&+NzUM3jfvh6Fu()#j^B7tX;JcNk#D)l3oo>4;C(5H-%FoqWI)WO7ql|E{Oogq zS`;&H;L>N+LC1$j(`?Riu=`jFdubIcehFE|NCb*q!SEJmDHvq59j={G@wfL z!R^AH`3yv`tVZXPL%dQEeJfp1T#^Flm!uaDA$s-1}w#jfbz{o`ly)?Xc8ROZ&!mKzsZ=gcwJ{B&flynC2M)*&R4y zQMLg|+FJ&>HpS5F7om$#i}I(*inL$El}}rpm0voP5VGrNP+-rgWUBexn+Jnc+eK%5 z2<0|13cX2Bi*OIHW-m}ZR~>?8bN#4W51d-BdR=$uud`#RLy!CVOcWU92I(ll* zp6V>I)ouro*ZhP9sXDhO;`eQ?C#{vb|4Qlqbeq@g0PBhvqY*|n_sC)tlx7mub8NY0 z{2W4Qb|pl|}omTJiV*&AT`x(0_z3tBxE2SvnPUqd0D#;~*t#?YoY5Y37fk+-2^% z`iOSw>83gJEA7mid4efrUV0j(E8ZuH`~?xc@`4t66=8>C#xkEZJI}Q5VZ!IXu3FS6 zyT;e5B{O(IJtq5=i(NB5%TQgC#K)jjq=t9DLTg9zRx}{j;>s4DXF+G=oK7nhs6uihCOW zT!hc(kPX*XiQbp~!-wTJ55T;|KZ*KHl=90S^00f?*9OY z|5Zc#-!wV@+kDilRA|>Li-#s4YdY^f*?V2(Q>>6xu;(#JQ*Iw`VQ;AhlxH9im;?am zjD8fbk#3^ylR5LoYVWmMUtXGb6j?X9yan7YAbTwTR^U00N|n-A87;;4Pmq2!E#{w@ z%Q$9$op=oj(;5W;rut?|d9fLcnf3$CD&{GLFok8gpDETTOv#$&Zs8toP(##y^3f4-*??XxXceM zK=TEZg$SWb79XXyqc zAQeA3eFg6!1lDb5^BOR|nnUmz7xtWMi#$UYw)CsvN5DV$~3R zJ!f#fw>F&B3B-dgaSQtyT6Di*qInGS!4<=oFO5wEhkAtrWH=Fpb_C_y-|eo3&@!sp z^zEl)AD0w6p8SWeX#E&skSB~a>F{YHu<_A zt=N6ZDp+LgJ^!yaMNRk~=A_32ivCjsnt=e1mKvW@$G%=POV$#^H!V)~jS~(}<>clo zeQEuXSM-@%J0{)t_oky%zx+tFzJ^xph^u1!Fs{R&YjB6DROynJli$rXD07Gx_*UE5 zrbfsHqNyS0VU{4AP@*XR!ecC(Z|{su_z2z*$k8L};eD?xu85jFD=E5s%pn(U3PB+7 zk{vm%5ATMnW&;Q5m9h)DW?!479Laa-Yw^be{6;sgEKTyBlZtWZukItkxWxNL!<-S* zeZj(Ec;)oemvSkJs#?-dY!5~~hWw~^NkiSGUO{`bO*00rX7nhV$6kJAT{Tlh4!1wGU+AbZ_!Il%!3Xh3d z)geWXe@y6E-p>#@qaEN5TAalO>M(G$l(0&;-4fVZn#%ck%vCl_tA4$6??=a!pXw#A zYZL3o%iZ(CZ*YcQW$zNUqx=!F;X!qlxWI1crDt+Si0j*PwK55fShzFt*~-s?ht(iU z02dMD#zdvPlrI;~+nqt>MmK1QubLP0U^!f+K)OD}^9J@zPwjPfpEo{u&04cnj8%MD z;FL%0DRK&N+NXR^Ra41McPG>`WVJIr6fWS5)x6g?mK-lcB}uS;-1=i=pUT?OlzSqewv>at60QJ%JjRR=sIF_}|7gHNv1lW#3} zgN25)wP(biN2=|g2nfwt9aZ-o`~2%ijO5)}%T6{fk7!nO6lEu$W?v*QwH1J6XS5jj z5ZWGHnhVc7aXCEm9Jeh~yxP40LS%5_1m_`a_~ipQPE7&K&s<#Ne$n1 zsWebPceBi97B2S4SZB13GfKYIV((Ed2r#9o;`0&T57}GB-Dpw`d?JSK81E6|E5|kN zq$kVni0~(sM`Q=Xw_v=Jea~jY3Yb#ye5C7l!sosq`q@}k=y6HBiYQ;0_I@U`5taMv zt>t;n)u+=DR-Ak)}wym0d(e zG_ia13IY>*imbyczGkQ%W%xWu3&mTvs-jFIq)Qs_ZA+1*hs%ULHoM?AOZIao z0v8XaUVF=kdvswm?P;ET$dQ^YVm87Fskk)MBEpeqo6`zhjDJhaipXwV;(rjctM{_6 zZPDj`05;d*1(AHMXDY8uNjK^n7l(#vgwqY)Z={`xEMn>}8hcxthBYp4#k)-~Z)4MeH-5VT6CKDRDzCr)(e){mUvF!xCcJ`mSP zx{#d8ec_~lkByHUNyQ-ZDj(1Oi#oyOST#O6blNyF-5h*g=i&OL-D`xP5C`}UJz%Ng z+0!v6enRkL3Gk3#8u(vB9A_-l78MN;SXi!1l$3IQe!=yo0K4P_jZe~oEYME@o}{-l zP7LBWZ9^E*3yOn@o|Cs_Px&0@m2IdmG3-&zMs#KHW20glO+p7Z)%DMkOU4zjw;`N+ ze!7Yfl@{`hgJ#mT^=EqO!A$q5uN{R}4cGULQT5l40YO?HM@Nr5HFm9#w`U8Ka}mnw zCi-unHnt= zG$QSZ@Uf$j1iBflu~53B25im6x%qf*#sIwB+6N>OiEy7XUnIJqIU#tcWwafoL{h(H zIjBi4=&V6_(5?0y-I~~UsI6mwD5A+iB$rj_5Y9)j!%ktn(#N1nC-*I5f@~!m5WJ#K zfqrGpKuvw^n>fe<5hB2m*N277_Qlb4XX%FQWC}c&ZP+T8P5=1#>6Xt7*rSnIrjN!N z?`OtAt!E!}Xu|(z#mD7Qt@f86<>qgvE}*?mei!|E=PfoogR=8okW{I4;$3$0z851u zSCsD2-DDin>(c-QN2C#9R>I*@x=3p3vuzY6u;keDU z&vO(5)edQhi4xv8AwNriCH|tf?cXx8NascoKz!B!L@}Q%UL9+yg|?=m;IdOq72q&X zj_2&By7DOjGoFHOoAtsTY_BZ;8EsU5Y<)c0Y;L^Vh8dmR`V+)c0bEL)Vj3HV5=t8Q z)tfZKjpC#g<~SQ?!!u?2oE?FYc4?@9kexo)7Gd#A6+5ZO>FrhqhC5d;LFM=pKO>iv zWVfM`c<}<11_KYxME85-nO1p}%k(u}YkZtd%BK60##5U&N=uI36u~M9-jp=MWl&Nm zu12&9;Y^TIPd+d9^O<&@ZCl3TXNjrZ7D^*XhayIIZfP<^y1AVrvAT7y#2;t}(X^x^3XbrA`EztZYbOys)9^kQ;gL`$P-I9kJ^K4yn2Mwrcak&zDJ-pkh8Z?o{l@)Ha^oK zFGAM*3Vxwuu5Hy;^6fZw7z8K;-mAhRckjnw-zbb{e@hcW8)5nqgtMX|M`qp*Ja_s; z>u8{BGXH{b@(^+|#699`FL&-kPqn-Pf1c5gADAOzW6U_vF>;Vfp?yQFJ%>B8|r+Vc-w%vOT-PX>_ z6#aG%ky$)X2$b~oVI7Al+m_wF7>S_c`r!o(rgLX_j{K5kYkH_QN=V6RUCl|? z;5LkrL+iuD;$O z6Ba)X`}WAZk%Hkm4l19E2_l@myhXguU!6yEq!oH_MKX)p&PxmC;Tu`Z46H*z{p zy2iao^FxXrQ?{KqC!IhZMlx@p|CF(RwyC)#-KHl#N;85#z}yTS3hA6YFJEem>v_u`bpgn<+M-GqyQu z=MyMzsIYui!^4M4tT=%n;#I6#4GB%Vwb_Q=Qdebk-}EuPWa(n}D_CPWV>oe41k0IV zQeWVN@+C*kN=KzL-(&(wpzSQEULA9>zM};0E9?CP6cG-c!BdnO>T zEY714Q@aaR5%hk_!IBw@@_UBpPadyjYSV;x)WU>oDpZYetRUf%o({y^XDIu>A`ceG9qytUW0KaPB=w43gE zeyELJN@!0(?BoQbX1Qke9-$wV1pOTrf(AQEoy>9}MDb`6cqLr?rXCmHnfVU2LRiC3 z>9-UL1Lrt`)T&gsQX{412enQAq=y#rv4Sqe)6O3G8Zy6b&JuwP*a^Xc@MK#P^TEBI z@)^V6^Dl&0g)ruFW-iii{zD~c3ucrF@aUA0?iJZ=g3HdI$oYmyuw>ufD&%cQTPaZ#MRia1-J%SMMPi;v%x$Ww%o&AY%}-m3cb21A$*P zjwc&SuSiREgghasabF>rljk8w3Y0#v1=O-+7O``AX@XrUqG_X8dGH={gm6nvd4)3wBt9{0$-TPx?Mr#JrcS&Wwbl=l| zjnQ_ObWe6~290b^FLti7d4yrcRlgwQ1KcbFN{a*hrni2JPYAomWkWO31$MFeeGtg8 ze^Q>$rl$l8a>(5Ig*XVpP=(@-|MEG~!zC2;AKImXGWoJ^OPj>Z|xyoCafV@%frLM^W?@v-?GYa<2r3Dp4r1XozL7vi4 zG3Vqe&;VPFH7rjG4tTJ8hp_6maU8~aMrP#)LDbW_aL`zerBGNVdcMu}4n)ZNdn%pD zdG&o9C&$XsvZ|2Oh6vxCa17imA3?-QXX!sBtM%Al>)Owsl$#wX?L&BcaLx6f`Bbf7F)S zLOknmZ8)PKM)+2cT51ZCjtUJ6eofLW_6`6aju+z97W9sL{u1ngm_Gqs7Y|1y9W}`w-rgXIN`j4{+5S!DCw@HBhYCFNSG!qVw_EnU^$^R3gT8OwbW`8ySrU|mSJR!8f>eAHIkald!H-&+Z zi2dZGc^?NABi*KrK~Bi`%oZ;n<`x$bv)GJ&SWl=Oyfji)1^yD7s>#21c{{f{oa?MW zw7Urg8ppea`8I~o`PutlJ`-rNxiBN6d1p(49WiwS%QQE@;(o1#b@Tcs2r+#NF>IfX z`+NcH8zo|KI6;-mE5b}i#}a{K9iw*BK$vb*Sx93bgDIo2gu;RvT`wX@?#MT>M^aYJ z)vkep77HjzzQp4s_R8a?kT)J+&qk-iDLo?-G#nG7hBE!_)!wR*((F%8otK>ahL>NZ z&GtF*K9tmSSGlA^_S_^ruY(o;4}j>2K%?O-wntNXX|_ZYZk9kcDpk8k{7tJOy%4AU zY0+XY6iHq?KZ_d^8FX%_VKs*OL2;ax=6EDz?i)bZE4Y(ve;SV1Xe=k&5-)SG^ar!W zWV40iHMzusXU(;Waq7>e4rd$@A{|b$oE{-|waTL_XsUV~K#T(1;f&kW=%AfU=rzry zB+E$E)M*04+5(+rmZM5>`=hZYORYDeEa-~XLL!=5r8ITH<*=rHt{e`1M%O)0pnXb{ zB?+bka7YG_8hEl5c6(k67uDkGpwF1J+nXxVIZc>z=j5BNr6tQ--)OZew9QaYGM(I~ zP@O!LrJ1T^lmr-OX(EDg_Eos^yAE+mKK}fs7=R#m33_s_G5_=hY6>pH=;*s3$d5jv ztR~;2jSWczXnI=-xq|JuFBtSWBTZX&?L=yh4-#f@?&OM9uE<0D8TqE*H0`VXE)Bft zZKK8QP=C4EIvi0={f-;3J>#Xoq3N7jr>%d2SGJ97S@W6}i?u}SDkq&*!*SmSr*YBc zP)GF|#0Uwx{kqm!$Y#t@M0UqVYe$OvNWZ-WVcJz>^{AH}jVV~dTmr>3PCsRYDebV zs*i{52^7fbkF>4Y68LFsWNinPAKYbEOZs&(%N>7ABL7RtOFJSIO~;G_NI-3PJ5V+4K{sQkt>H8idV1_n-+6+!rV2S%P+l&>FAlsY{8 z0y27jz>Cl{U-JykEnOz&>hqT|bQRzH#5@iK$)68MT{Z`sWB%=0!0qxt&+gWHyN-Nm zh~l5HPNcK#00`7w@!&QKoEa@pzoWa<$%*5Ivp-;nTuz?W@0e?0m_Ym~764NJ3Ggho zYp(~X`jOC&{D4~8JxI$C+r3qWNU!+0_)B^&?zWh{0`8OGg&c(~e%LRo;;^~Oeyu7- z`thxT>R@|>CIWF%TcB3$f)_~msNYKr2J_8u^AXN(3mcXG@_KWj0RH8#**cnACK8;> zPo;4{`<$uEZD4ck0Q7OYt`kweD`WLoerc-TDTIN)1jw*17&_7`h~@Mowe$$-0|ucN zIc5?77`yM`A}~JNCXU}>h0H^T;(&^!Hcx{wHytpv#@!nLHMi++GVUfRjm;^tu0z*i z1or$qxb+Nu*J}~Dx%jJV5vE;KjO!cU4qO;*tAkrH?CAMi)TsmH zkMUc+^-~&)KaZkaAe(aZvps?URiz^Fz(4&NTg3V@ZRO@{z8Y)V6Jh}h`+_(sOJ8p| z2xR1;mg?BrJqGK=Yt1Kn%g;kp;Ws@4!~+8Ws3Cx&eG#|toDQ!6SS>?I0lo-#GYRI^ z9GZgX0f$w9=9&ZWva`C=ho2_*rSZ(DI}df>1&7c2bC1~ z_GXQ&vU@nRKNG0Tvn=pM;Z+im=fn@_(0>h}qVZOZ2m~2p>=IP5Pn&VJYRa2`pvF>n zWmgxHBR45@w%t3Z5^NPEUMnujoljMk?8Q}+s8P<|kZKKiqpj!5paDSw!TF+Ih}cIX zrOW7!^2`-F%WTT{p*8_<$-vI?Tm(infs3T2b!I?j&KT7q?}*nyeY*r^6vR9ska3Z9 z&!-m`&M~?+etBgqEV4iECpF=-R8r}pw)EjP~Y-@nxfkQZk-hl>$v;|qaD_PZhz!F|tKX}ky4 zgl8v}(UR3WQvkxgScd*3qD_{ST-z{+lN^5G+KXRXXenI4a~HSZAMw$@voZZ`+9E`s zr?&2F76@a;h4RXw#v<}EA7ZB=HTF9 zf6vId9zop?r$iQ(TXS@Ch1zbec5{b@=DPx~dkdQF^tCeIDiNpHXqNqc$e9d21Q&5d z1I%+OYmg(ZvKizu&CIP@S!pNivp`MfIbQ8Fb`3+oIFm6y&@4Covw&L$p9tl7*hs2;-$+In2NZ(q4QdbsKh!e(RX592i87ITY_w0lwQU(YVTh**r0 zZ0(=Nr{xPVN4DH6K7e0%K|nd@4}WW-WKGwQFOZc76Y334v7iULG=Wv-Q_H@*Uvd&& z>1*BJPt$g*6Y;VewA#EzGA%I53xx9FO|q|T=WSt+at1T<9;m`xGUW@d z%IKrx6g#=o*jL`9VX${wXJFLHwREaXyRjL0nHc=Rz=}hP@@7Wr10kk7xXD>z_4z7{ z)i`P68BBt0E=uC-BQ2J86ix?;pzE1tel_s@C}a;G!gA}r-*tAhB(F+( z(CDP4YhXI{=w4=%w*f_P&XfbU2U$%c%)lhlr=i=9dy$iaHV^eb)NjXjG3%&e2JWI( z`8R)RfBU*D-3R{RI(?^eikzKJeR?kHKAy5$odSo4cC3KsFz&&$PUlas?(K7otH{3{ zM1^P1y>OVJUAPM-Ai-9|!Km#mG+L-j8(#43J%xGq5EY)zk9)aZZXVZepE0RL)rDG2 z58fS>BZwT&iPs~`rQ*!jN%qC|kB&;Wb0-N^AHjO|Sq;!kqjHz@3gSe!LP|70acuOB zF)xQ!P={#?I!rq^&Y3ZFoQk@Y{D_F6Nt!s+d>K8e=nusQ)L=!;oXBxFTP^X-65hr) zE48B41Evy^Npifm4&%~btsya~v*BX}IgVq{o&^ZfXF^Wft)!r9do6_04C!0Gro@BV(dL5blSk&aa=ji=X$soSxe zMjN-B{jrWFq?#|Tx`gT(@x9WV)Y`8i%3bGFkM8e!$1l;fh7jh`eLw9r*(Io**w-sZ z#|&%l39w%_3C0NX889$!G?-}zdeUnCv}Oq%FD%hi-jA)oci&$*Hv_m=>O{tM8Ego_ z-M8;-1A6*cj8i(N+9!vk^Ai8 zNAmvUCH9P=7*lL>JZ7H5rRF{ zo&8Kg+!czxoXj8nIzc1fjATOb{mbW8Ia`SLe8onzt|r??;%)>7;3v*d2jmbsl@;|X zOW2S9M)IGjPj8gyE3-NL7%_+w@^;wRXgPT-GK@u{qw06@zP`-qVQ`P~%y}S>{0DyM zx^YN#W`c}OoPeXd-KpIo9Ipo!#V6Frgv$I*PWlAZr2?PxqpWvWgulA=I4B0ha0ZIS zZ-SxjrMlO5+L|wivnLyL#Uq-cSZI-J6T$k7L1*xqj`iL4S0!0Ly~B@7_9nHObLYN9 zjkdNv4If!H_MN}k10KGg9_N4=ry`2W^54OHugsoAjtX?_sTx-6%QJ3^UDg#ZA@K;; zX=;x+>$Z(_Eyd*r-`#H!V%La^7DVsgw$a_rwP|3Sf*ORBQ}WuU3h-3VbhLRpCut#8 zOAs&jNP`{E{fFy4Cni}&+7qECw=8Dw3JT3t9OOjP_tJOi%6#$Ke}i-s(ifpThxs4Y z$Z$PJ62|4{mHAmbFmR<)?M#VvZ8tKm-zGM;1{4Bh{StGYkLKm(c!y(o6pDGjHmDF3gYSAY`L0ot9K*myx? zdZU;}rOx`QjmNPfkn$j-UBZq_if<`Xq$i%unTKB5JJnTWU4D}FKqIBKT}LoIPo7hM zwaD?UNweRq*g`LWN@iocxUJVJW6yqOOXcXZT<1brB+hSJI+NW#C^2e(sji}b;e6Sd zIvdHWL4s#UIX`>aNrDZe5$S*JyNy}K)5B20f062A@E-%J>3CLnYO}$ z-nv*jMaz89{mizAEVYmVL4f*xWaq11V{f}-c;!;U1F&7Qm1W9d;$f($?ELM+Sf}|5 zs`0{Hsh7((1$C6==WJQ@@{QwO`lw>Q<9#+&Is~7LcyK)D?}Bl8aF zqg^(DC%gRs=W&G3*rouoa-Lr?Nrx$|@4h8AS{6PIvc!MQHl=BCGXpVAd8grcTa~A~ z1gJ$p6f`RG8Wb13t9Iqg#Z7lw7T{YEFg;Fs4|4huPp-X<^5zS7Vt^EydkF3>*0raT@{PbbI;Ag6>D zN2)P^M#=^eXq9T)LcbJucVkYs`Gnp36ao1^!r13Q{@*)?APR}bQP&ucRyotQ@Of}y zq%Bh&&bApmmi^A~ z+|XNz$(NW*>oxJJ@s#R{0SqV z_svNU^A84Sqzq1sx((IM9`^y&!}ipXg|@}R9poLxxltn?*0Ua*XEc>L1J7^HcnPFN z3kHqMi1_BLp_3AguH7*sZAK@e{=mb0QjC`>Z^f|ys@}M4Tml^svLlQip><414e;*A zUTw>&&=IHFLwA*U2K_0=E*A4GjeAc2L5BowT;*|0gzAn^!qNVCcFny*={=5kv%Yj9 zIrnptTSBX3^`yQ&k69i2#Syx$>gUHuuiZyWw$N-dF9FZ*6;t>I<0n~mw(1O+$b&|hIE_iCdo-Amav;k-Yp{!xi(_G2rLvB{?& zkqh?7@J-gEqLSY*UH7otJu2nJWmEhb=}KMYK#=7ys@)H%caArTF%MH$tp==)botR0 zkEp2NRnC8c38f7Qlph%8bx&UV#6xvcC#ys}i|4Un;Qgj!>2)n#VT)7!Q;z!2N&@i1 z_e*tkW0;~wk?*{dGmuIqZ(XdUSJtV`ate*s8u&h)od@~QOzsjbt3eR7LORhjZ2+|7 zthHOSQ{Gba>CQkWNo=Q`Y3N`=6Uo^<^v)z$U0IM>M^SNnh7!GXmzQd45m#X5lWC>| zIdlKx>ef253s>mqI8h(wZ(Yf@@ut(svI-}V^O|9;kP*h6pE3&9<5Je=avxhg(6y{O zcUMx5*PJfdreaJe;gv-lzUXw|cuVDctFq$-!~DxEfkHkT!kjNw2n^K+&u&#VgQRNj zG!18^mOxrFp;MRi&txJ_{|rPIWG#{y1+qu1<|-`d9_*=C!ui_5lz$Kt6#DE5eI&gC znY7nli!sjj^IgTZ+P0F+`>D)z=JiB0Nf$!e?f+#seGDvGr^dkD)Jle7i@{yTFm;)e6dt;rvVt1W^OTcbb8S7q|(NpGzRT z2j)|{s6NiYfCGe&Y3?mR)yB7zV5g6I+$JnV1_P=QFABEtBKO`j22q3Mt=1-I=LG}-+0&t>q^aZ95bK8w0pD%WAdVV|IC{9{pYmNGG|~CJJ74AWW&PwU zyp}|*40f*Taez5f8zBRwLwS{JG$89|0%Famwy=H;&jcXO77?(P1ZM?}ZpA;r<>|9R z8a%60b{mm-?V-zeHwp^}kk$r>YGS^?O{feewQ3%kj(~um;|>`nS`79`=kPcckTDtS zrvpz~{)R+)`E~z-X^F0NeXy&#%AVS+hVhsckT3;tJHu*beE{8p0}F7h(4&!mq*~me zcVR3%7DU1Mfl9_ZK0rm*OPx`toDSP_2`DeqB)CXd38$E=hwYu2{&!l=j~-8SGXF*7 z(S1QGW0;2%5}BE=L?!JNvZZLz@K(FP{Nrpa^K^pz88IenKAi%E$ftgVhGXj z;;EWok}|eu3Oq?AuOoI<-e+SX%lXhZmF1t|M@9K;Q@)M1Wma^Y1NiFC!&Ex*P6YZ# z=a}0_)|35K22rJ%=AXo`pI)GKmpgh^*AIiKwkyXR5k?Bl@*XM;0%Vk(@;eLgHGD-J(2hAs6 zpQ=lBiKE9X0kXisD_k-2SHhd6iPTSWsnBft%#y8THjC#kMG>75qjrI*B*NTov8`l- z5`V(>R8kefZox`h!%I5Hh^g(2p+G@+KuZyr;|~z>S8Nd-J$?Dm74Rr-KNfyo>0Z)X zX-D=u`VsU4CIYK(>%Wd8Svt}eWTb*&g!rYGu$bU)UOg)6SPZdWT0|1h?RPQxhwo3a z`duU+aGi&s7Bc);QV+6G-S`P$uG~gavVNdC&II!39C{jV@^lbJd6)S6`~O2rc7I#h zc0%Da|JwZx$C~^*_FIb}obG=ut5IeS6FnLE=kpG*sFi$v&+sQ?@V1RJe&Kg%^<@qgVuEe5;P zOT;Gw(}s?2he3&)%zPV5&!cw@+0$XHB+z|y$LGP0(-F9Ld%G`3haZ5HIL+vO*2t<3 zyzjR+_a?Qx-q7_`gO}#wZ$q$u{XtAVeN&saEyPgs(=TU-&Sk} zvSo~5E9yhF%hag9F#iA>{H>3XY*r6osWbu$tJVXJk8-;xV_`$aG%kXqv9xFM)@kJF zmp%`D4cD#yuT*McC(ELYk)4k76|lZwtaK1qWW+gM0~dfqnIoh`3Y07!g9^o}`1 zI%~W^R1uxC6z$3<&46~@j+uPN1Ayc}vP{R{dynNnBXAQSZcPI0b=-8wV{i8MHcL{YA7IG5CIWw33L^vmr`#h6%o?kA;U-X}x)c)pOSDgG<`sQNhAqy)Cc! zj^yJ0o%$_1>!)&YZsdVQdL7P}8qNc6A30jVQP-10ib#Cy~I} zZkWk@6V_*UlWTO-HcWV|z*V|qq0A!nuz(t7y)usvhZ)aIH=OxnhKiiuA?Gt?5(zltVH zYBK@&ZeeDnvZm8nc2uFdDeMSH6JKlu_zNm7BQjt##em&=A=uvBzZI-i0&QI>j8c@F zp?neht9%_Aqt5a|*;lIV>Z=B#J1ncrxsB*`O28!AB3ck*Vq?TEa<$ zV}auIl5>$msmh@g`ThEw#|q>MFq=y3uc&%F6y`BiSg9m9({(_RSh z$;3`BoY?FRwQKAJy69PJZLM*Zs!fm}7A)404(fz95)2!2#>B_itJ&vHmyiVB-6?J+ z`@U?roXIn~-JA!f2mDxx+yw20u=?F>am5!Y+p@EBPlp;*d7$IoD4L|tzYtlTykw^u z8I;p_GzzXnQ=<2xK`E9VKZ2g_3m5Xn+WB{kBZTf+3xEOvueL4*PluzD`4#s`REkOl zoHR;+WfGJ2O?JnlojX_$+@~-rmFMN>PA#0%4ZKW^+pYC2WjIqnmRCY2iLEz`lU6sb zv#mrwBt|7B9j&K-!oe4(qAAH}*2Dg^llzclb%*Rp-BUncx%0XRn+R1a>z4IwDdKL| z0~GsOpI?F$k7q9H{t^bv4gWKC8@e`56&mlSe1yYLp9Ch^?|% zIYOXKuF{S`HA?Ku%kOV_zidVVyP5cI^?bj(U)lGYh2@-|a`YH`s9gI{9JotU8Fg?<(yvj1s{<#}# zOBCA2jA;1V;5NNxy}v72ZfW1Rz7m)Kc*GP}MM$E6x>1293vqb0h z+Z{l2A+ssy>FJ|-bTJic@@1Uv=yIG|$FH(gM27e8oXChe&oOm8@&u{=^us&+Sgcxn z?dx6VxUq7pVu7Kc%PiUnjcDz>=R%^+o;0qN9BTo!nGu08|BW)+epO- z@z7z6BJWRb1w@~C4dIHsFi=FR=jDcM49Nun!aUuuEmJcYF5>P6)`1U6h7J@QqsaIZ zAwApa>)6;p@+ymai)^Pi`Yo-_?kD~3pA_l#OqqsV9fPzL*pYPyw^mUI)IB-XO}R_d zps1NS7M61nxd zDSq*bEiYg1Eo{Ewc>Zb8TctD(&%LiBy3rqYL7N__RXaW(xvB^UH;i|xTCzx0dV zk~$KPIOH4Y4xCUJOS;pHw$O^nz)PEVfwSfoXxSG5#emADArHV1$7Xa`n>RE=L(GP? z$#h#>ZL%C+NhlZodWgv6x{Pz~Y{{jrE~&3c5*%N$Z5m>Mzv_Y8Ihk*E`#pU}raCJ$ z@(C3a;0W$Ia`xJFWR5i`@+ZZQ3S9TANL1a9R85Z7*sIMV6PoqK$zdz-`d`?nRY}Ae zl17Y7RNPT&o1A3RF<)9c@BuPc70V;0)ysN2fq!Y3Y%%myOW%Ur630Xx z;UxdFA~J1W%z!010rpe`)y|jHa4AD^sf#v=A$<4`sxiR_*1;Eh%X5kS5;mSD08lEX z&zOd8vhVsn;*dk}%jN`xBwKyxT_4n6VWwDwhijD88e(Nae3Xc~RT2{{V}4+P=F&cfH7^sa!5r@lm}5 zL6APP-q-@sViY}_4{<-DeC_rLA0W`%l$}v|{!TYL)s6W(n^oU4pNpgI!tQ6mBP7jd z@7!AR&2VV(vy&A?he*+n&nmq;Gh`Zq10}jLE@kV+>eq?>ogqH=fD=b(wwm3&RK4SN zA_t?!sxAL*;5yPDiF%*UCVP1xwmniNMpOJl$9WC1gB`##Lf%avPyVwZYNYf zD2@Y=Wl7q{ra9+E&BtodkXygwZ@Tpw0GnqUWc0g}N9ma1V9wvm9X}21LZ1t5sY$_~ zG;#eTQUc)_?%BIq*Cs2P!2EpCb9vz5gp=>ubBTlPh~V)}`@a%Al5Yzh5!?72m$2BK zm$#U6kihbD4i4H+iG)+V^{135TA{^oY2s3$SoAos`C?j=N_SpdXYfLf?j!%FHq*4} z(dP#og<dnZZ`xnmc@MaW!)<_DJifk z<@A@x-SR!a)t?72`Q){epohF$yZ_^smNloAW&9>%YCxIU#+d%%J6iQ?KpOX^8y*Ht zE6ZZHeLDMQd)6T z*@{vE**5lxD6bEgBYN3{?0Tu{l2XHp<>`jk=RNjgQj~=+Y71mJ8ry{pnx)$C#&Y%* zEEyE*Y8A6Rwt3yMi2(0|&plLWJH>3f=gi#ZZSHw5FVRlCS%DnB8$ksN@ONz!;KGHfN=A}^e9z5*E4)MwuQipKpu*^K*p+N zt5vR0cmbL6y$rltw#cS+cHg$k~5cJZ;}GzVYqr;N#=jCcb0i|4?kc zU>vkV7e3eO&8cv95(0{*2#MbZDaa@vx}rogi=I~@PXLC=Y>PHSI?lGB$%RLEhQz1w zX@S)Mg^kY9&OkeJuPz(r8+K>?PP-f}$6F@M=N0Tg&RuP)SxBbZ+J0B@$d@yAZ0Q>c zYotri)N_Nd_p(Mso;zcMwKjCyRr~(R`+qWV?hv)P$^)I;Q-*gs za)Urv%2|WB(Dj~H)+@PP&kt}6P3x^Yf!9^Lk3$<52<~Rs`*fw_3JaZ{q4b5;gTkh^Bk%= z-rX^J{0ghv=*~2o4;U*u-|2$%%;AEcI_@C|D0ZULyRx8q!YF#x@(6jDRLLr@5cQ46 z$Iljr>m4C4wWEm2);LX(&@5NyPoKUuFW1T_;eZ9oIrDpoUBclE->>a#r!>ibUfP@q zpPcVVL_A_r8k#4P1N43E?E7&Tb}EohcKdMWKa;msJn^H0%{mAAI<~lM=kS z0rk{q@z9nX-;k}|V-LHphaG)e`;LB*PGbw0<(`;3{=Lt&NfIz}VbQ8R=)UIcS523G zuU`)?w8J|)nf$Q7KcaG#yEiAmktJ_URQHtla)(S6NJw`UDhbn%-G58QGH4&K6X<6j zkJo1VhlCUp{u;?75ni2OK2FF?*V8n+1eTNncXIF40)zSD4l09r)7iF_rF(|rE4K4r zs7x>oTFNX{zmYSRyt%x9bZoPq63ZR@?W5f9Sj(w?1@d`8W(N#v#nv zqzCslK~iYDR>u|t%}kUr)_`05c>`K)o~H7^UZ;wQlC&D-XP1wu{)X!J3y0K9(DhqE zI!iyfSw2{M)*(H&pK-FqmvaSi%)Rw2Ve0+%^21L19d#RP!c=b!Cl#9UvyDw~JkS}r zro%zs0!q{+`E4a?j%+zaXThc7#l9QdvMKe5M(bBmlbd;ne>5_E#qsrq+dn4$ zoP0!?YNe7a#A`7Iy$|wf(m}=!Otc-#^O7A|ZBp)r;~t-A$SstQZIrz^;N)ca&`r36 z=64r?e#d8BdIzQ-EK*)ykop^jWR!xIwV;?Uk6Hc&%(Css*5=R;hXkkIyOY&c`^;7R z1Rfv={I}muQ(ps$^Cl(ylDx~$GB7;4Tq#`{aR2@u%{(;G=80IsZdD6qaR*1X1hL2B zZECF@IgF)lc_I_VZSOutz)TbmTzajMMA!~(;#mB@`O>?*HKCOVd;| znSW|`Z&AaVkJLGuCfCn%)oXP#3*mDKu09mWE@$owR>?Ecrl>eHCZ6&OI>Ej(F2%7i z>xu0reW`8%SIwSht3lr}ng>fX1CUPpv2lR|>l5+B3)~Ziq9-SOZW9k3dUyZFV_U8y zX5|urn~?N53c_C=q??samGG{AcZ%OexQh@wG&DqPY0)f!fA>>x|JukUZ`y2A7%0fg z%WDhcyQ~nMxsB!Ur;BXf-|{}f4>{S>UyUY=W_g)6!X0Y^1)H{~r)L+k%?M+bRG+^% z@;NDrowdz)kUWeXm?w*H$@4fIj?xl-Az9{qaIC%W&lji`&sv5HGN!7-`r$lq z4n{+`O`^Dj1UX`>)foOxOxtUQQBXMz)7Y?BY_?wRep3>X(R?*WfPMTtk}~M0z(FEF zK3paJB;pB)Z#i+A#1=^_kVq-N&@Cc~iP8f8f=uWz>5;QMv-O9`MMdY(DAX2}=VIgN zs0Mu9)~^h1|H^;+v;W(FcpYqewxBcEI{p{`(eeK$z9SB`X_>HfVM0pkT6ZmZUD>)? zZ-4Z!IC`)paBf`+B<}0?g^7Jz7wYCQSnc4f){vWCC>4_xK!~A?5cjH`lmmauN@6`o$l!9(8+C|eJE(3^yNi< z^cjQ=! z-RHh69`gfd^y>F59;>9XDC`IgLa7}rQV*38Jkn=@ASVxbhGP6iE{D zAgrQ)Z0GxH?_%E}l*XHs;JfKqMT#R9!gNF5U>)H+mS!_?^!=Zo3Kd+uBWs=q3yEgU zh2Vu&{ZPeFL`iUrd#*k}l&+TBh>_N_i1{ZmElEB!NYWem(SiS~u`7*gB1@z0OSek1 zGmU@;`ve09h=_pcBs3ZrwqOJat3jehb{#|z2+?Nnu(UWd1Vl7p3xOa=lx7_^B`%E- zNP?^;3PMX*B1;lNAek!YK6B>Z{H*g{y?W=oTi?A^-~HbEvngUL0l57C-qP$L>&yI} zQT{Z3?PmDNcpN=q-j?8rIKLjI0L>~NNzyD5{Q<2*67N0udmxWMP=9*b_yoQ7c1)o7 z%2kqkRYnUIMAc`ph7LNrsR3PI`K3t8Mr76j1hB>a$8Rlf#;mSbam@p_TRG8WlXiOY z#B=!3yzK9h$3CQR8DZ|q>S?9niTpO8duPee%GsoA9rN1yI`Kl|IK0i&FS(Ekn_hd= zy2QqcirnnePM5{N9dHxo}hySISQGW#Oq&*hSi!xCX^%>@tDzy zL6j|NKI!7AeiONhz8`lFqB@@pie>SKns^l{5d?@mA!ZGZoQ85z$HH(amRp*p&&Y>!Ic;kB4CX@`{2p5VkAS6>isgB42o(uMwuVKl z_O$v|s9WW+VLhpX%x4w!K5f?83MUFvQ@Em$ETOBMTV`V^_#JSP7yTg!Z&;wzM}e1B zsZ_f9y+@LOi4~{7)GB`18*(+jd@qqNr6*H{l~bmX4u2qX`DOtHdfysH`!CL2?|Ph@ zxQ!a2p1o~4G>v3t!_Leh5d7!p7KCS^I&B;=8Xu=8Uok8QQVuNUO3P{ih>a-w4OP(K z8=sbstJkJ4#kd8IB6$1hi4>f7#bTcZ&C-cl3HO%1ibpBBZ1{w=2?`^h&7EGGH+f4* zmcKzUZ7}xQqs5k}na;37z*GgPMeR?IBL6LGHuW18-C%sVH|G&go}ust)#RvtJlSg7 z1HY}_I}r=b_G!@erCQ=Eeeyyr?%=|Ow-V6;mj-h-Oa3BTF4UkuQeEzM`Mpuu{Y`;3 zq#>_{oR6w+-xC!|^@y>-((-7dg8T-_tjC~A+>8m)?($4O19@KPPEjqRy1>oZ1+55m zl&5&Cjnsj_yeqmH$`(&bIutLSlITMUcA$ZL0XX=om>!wdYR$0qXP^{N9_5h>%iu*LFgVZ#Nhz^2E`mq=84La?ADBgCvWA^JE7Pi{9W z-*1<0;ID~lN7l?6MhAcWa5xmDUmarXuMA@E45w6wflIHs_P$2+_ZhmvHQ?%-BvIY= zOZ*;0W?5YsDngy6!POrdBT3x_w7=OkD2l~>g0egNh)$6%ET{ZNt)!9XNQ)CXbUau& zam)F}=%BIXyyoj=z0n-gpyn$w=%wDGtdnqDQUwdP72uNsBKl1y4tqH6y;z_3F)1Y>6HwA z)~SPcja)9G6>An8^P;BlW;uHpy}Dq*L5BAUs_*P4nqKr*HsbYrVfrH8v+L&AvtpAb zpXv)6W8WfoxktK+Vc1idWmRR)GlM7!#tZtdJtWo5;TF@#4Al_Lr>1rLGr>g zlcOV=veP(Gb1W9tPB6*}_Il9LLhapd{&Tj$Y2j-ys9v=XJ^!Z5@UcTeL_`F@Uw2!N zfng}bdQ)ip#anfNm#rT7ve=|t?4NoC2(>Qv41*uqVF_>u`~)~$C8Yj7-uQ8ewJ#8$ z9c&;>-3S^i&?nR*G!kh3cxHXf2MD>g8`1b#$o$*-&+P-3vfTa()hvbne{WA)A~82O zDE^6gQaU>wp1}s7bnBFKrr;e`Qgnh5$wtGS;l8ObK=Xk_RjXc1-BuV4-^KB}eh>75 z9?Zra_FkQ+w!$N(VBJIA1{}Zr4FGpe#_IhK0b*XKk!TMgo(6&KH8HrycH!OK@5@7} zeQ^a$e}1-w2;isA62&wv??jYm*gC(6Z!&hxYgJ-Kvl4w5it*S1x4|MUjnvtB&Ihar z7aOkhmkI=eJoGPBrw?5leEN#Vk{a^!W%wT9svZ-9VGAoYi5o|l#xSE=&$14{+h@84 zrd2M+?&!7tmx6{i_|(`s$L;=+_sWtbQpL_C8MliFd%XhZOcWa|Z>XJwq^RMXv#FP@ zcIzOTvQezaeZTG@hW{2I+&K53vooXaLa4hN06`-iq9*a!&`%yQzM!6TG!hRz;>{zW zb5VW%lo}{Q^M&{KwxzQM+gMci=CW?pTj&A*3>ImD{SVB|&0!XVo0Vb)A%J1)D1B4s zZI1?amYvMqm`;czptl)7{z|8peQr}9S>MXHBAoxHbvXFgE&J+#JfiGzX4NLHb~)~V z%S9B*@JJ&DD#*7C4wwxFMZm|en0Hz?-FLtTWaZ}RdtX#wA0;jUX^RIyrV_wz3lGR~ zddJ4b(lx-W^4BR-FTgt&2pC(LJfKo4^U!6$HpyuXh!Rzli_h<{u^YR33V(b&>slI+ zBh{0$f~!W>H!2Fm{=Z#esK=?S4`z|m(qSRqOLW&xIHdh@xgCGoW0Jegpait;oatqK z$7ocM+fwhV$;hI%(- W!up5Z<<~*}-QtSDp00HY`T0K|gRg!7 literal 0 HcmV?d00001 diff --git a/EdgeCraftRAG/assets/img/kbadmin_kb.png b/EdgeCraftRAG/assets/img/kbadmin_kb.png new file mode 100644 index 0000000000000000000000000000000000000000..40f6909a9b52f74256db955aea6a099709af7ecb GIT binary patch literal 21912 zcmdqJXH-+|x-M)%0hKDfs`Orz-b56Hi1ZdZNbdsDL8K`qC?$aO5~PJ1LI()~3Q7qO zItUmM0)!&Hof+SEt#y8!^X>KRvG*9~oF9ykIcH{h%Kh9|x#t`0r)p%^n66#8aDhzy z@k8AU7cRS9xNvcY$9X;W(MKNt&!WeudB;Oa=j&*^rPvzbB90v z^J9G)sgKI`&+dOpQ2wnl>O@2HP?hqXCf%@g`o&1bD-r`%`ufj!3MeHfIY}cgn5dHw z9pRRXo(8q(j+u{{mnzPlo}8x6`r_PuH`YDpWI^@8>n@fq6kP1b6TPS+^<<4fPW2>C z^>5-~XD6OtxYAN`;21QTIyyB}s+zDvSLsgTyZ+dz>FF-fGrT!IFfCjY*qn64*c2bE z`V?dXnyzfwV9I^`XQNS{179Oq(00BU}%g zpFR=|Zz}BW4fwoUlz) zKzW26PovR2R$*)VH**gj8n*`rIF$m2-t&1zHDw3~(+Vv2vUor7$C=JE39NYdcQK=a z=5}&6*FD05v<)!4nIfs9C@HS;irYklnF^DNU)_(W?CQ z{1~=tmS@{(U5pM{I^fzDUe;$jO6PY^et%FRyUFwPhUH( zJd|E3N=6OH;mrac$sBwLJNmGhoO{A~a& z=13f+MFrMr77l3-c{sfmemQ@-;n!7|sK{iR0-UOKb1N}S{z zPmaG$a0qXn;Mw~8nNaJ=c7KiF*#`cs@^|d>d!Z)>o1GM)qG#G=Wx1|YBa2Cst@iV? z zQg8A~L0SeCk0U*TmzcfSA7x!Of66HES=O~bUAeiSHU#DWVBjn`FI$l7vhRqWAOP22EA_YZ>ZTQYt6klKZx5Pq`jEYd_vtK}wthbA zJKinYv*ibTR!`bzb~93D{N_y1EC{+|`-P#4sM$jt`o^x+aBwT|K|j;7_q1C>ci*X3 zFDrdC1-nI{uV#EjUb=k1^Wf~aJ<=#s_a?5& ze$B>VRpxGSD;DeRkC!Pv*`;i;pC9z1(UfdqCtI7pN3>#u8fL_wyuVk9@X_=M;}WIT z0LD)n!zXEqzI3`#8u6x3A~`me+za;tfB)Ld#s~Afcqi0*L^QK_ltM4mpmef80dhIj zF0@+(?xrlXXe9Bah6N6w_c-t{V^3L*1+Q?bV7WmrluCRj;UpVFd1Ez^U72>oa~ft zN*6?-%xmz%r5hO;4+Nj{>wa?q4z6w1EG)L2B;5K#B8}f)ZAP%4+#*E#I9gYWptHl} zr9Ic6g(&)->t4WYjv<5uay7PqXTb^A#I4;k-H>mDq~fhF3Ax7G*)Ud(dAH_hH#rEh zo2zvEhKcMNv*~Y^o_L>i9CVA=lv9TSA--EQ$|scQ3ID#-oWE`3*%8h*W4P>dQ0tE* zkd5aZ>5jenss!_sGay=ZuajOvJfM=DAQsAOB}cI+dru*pwstj0lID*v4U6?Q(U1Dl zD}mr~k4l@7{_4PPG_(8bg=L^$67g~4mF-8XvB^XFrn$IkpA|G395(XVpvsxW!W)1Q5}Bw|_Q2ui9$ov+Yxe`oxnPEPnMDjkZkH&Y+Ug*K{!s2$l$| z*0g0|60>8kFpzN^Ib`T6kqRtx>t$D@LI=63bwuzDLxyvyRcv2pdO2C0DwsVYUvcra zo}c0=Ox@}Y%pkKbriGAKhwmhVMT)g@*-Y>0?yH3AektC5;@6&L5y&T8RXn$}`z%ff zNph3>$?NH_Su%CK(3@cH75C2pBW)?mu39B`XlMo#F>t?*1cW@*D-xqKqNoCo4Dx8I-k=$N>cxv7Yp4?XtRlqLq& z(cPy3o=fjQR-hvxMB7h^sy9k5r0Q77`QV)i)H_N;v|k1c`u4Zg4DAv`H}4``;H&YM z=G>#v4>l#%!N$(&tLkmNxGy+h3GojedWF{WQ)p(;ozx<+>c%uJ9#TDt#k~^FA?jX~ z*=SsXc-fGl7{?=XTPaW|;g&V(h|5ceUr@CMeejh@HHnJBb|=jOyDDSV6h@Nh>-54m zyq5D$Hj>@ z@SsGg->Is3dF1i_%gf}ilYbS7x##U}cd?&&x{?)RQ;uUdP0x18A|<#-tiQ6ev%I{2 zkWUd(0=hsno{tXLc?^FJL~5oiptpttRqT@8-IuexpE|fa_RtP+$YNrU`qWshLD8H% zj8X;B356&f4JR%}sB1cI{c_(&^z}f!nlrM<{Y~l+wXFM0BZKiTMdQ+7 z$8ei$nlt`vfu-eG-VgUcwnDh71Q!Eg&9fv?NCM{Ly)F(RSC+zwwwl*a+YRMcOxB6p zFl++d;OHIA{-5;in^6)2nQBbuK+ab*$M@om;EU8XkJ8F%pmXRL_;;RE-=`PtxiH*! z{}HWE2B&l|5Kr-5NhGK)U?KZCv8^1H;qKDiCvV%JJASX7Gt=t9WA$W5ZS|iMKFQ_L zmEk_iDhj>j#LEvo3tf3{&INh&oOy*t@?W}%k5hk)J`@`}gQZz8nKFnZw{tEBYeQ1I!NR>0i|PTLyC&caNNUr*DU zPr@7jTfZgXyS87!aiELu225TrY4_`c#1(989F>|KV6Jwn95+VJrqHG+mR zhhh4Ye@;Wk>3U6p2JT|Awy4~*ngTG{EOUlc0?`GokG|2D6tSfw$2E3HIl$q)gchvXsqahhKaX4 zq<YSs`}g&Zf~)P5EEzBRO}{!q@gtOwcrd>}Otvs)@Zf~~|X?OwHp zzERi3QNne*G5KImwybF&#i3K7%TL(?!ii^Sn)!=KCi6?AHl?r+!39)(LQ&vXG(r{% z)~{$$j29!n_ikl;j;t{c-tv)E98bf4=7=9ctLJx`YvlFPfWi+r|3W+*lx_h7I3a8;nS zuz}ZHAMI95JN`y3s+@$nIVCf_!SM7nhHvCvB_OKQa;Zq{(4xKL$jdI{>VA=qR&N^y z%85`Q<@hwD>i4)OlmNT1x+LWv#ww(y?IS4eV_Zc z74=Oh0UTUT1rD(iyM4{_{x94Bjj(C!e$awd^WHNy|DW#U-NV|!y)I5 zkP9I@WVmbp#L3C7KTU)JKwPd263+S){!0lU(3qv-UIKdz`?uCAj*8)0Ol_dG1nYS# z0>aFyh&UfuND^T)1M>nm3N9M@nJy{88orKAvm^&HWs}5kvWl)|*DfS@WNospYc2@S z>fXFkj83ZOxQeu-o^UkB-LB4GVrRtIF(cvh)*(l~S7zHq=8gdj#}HO26zu8wdsr*F{-CUR%ib$Hj&w*Rm`>a3-OJ$UZ!j;LYgvINqTMFd+H^r* z)OoULDqFYv2A=;=W|(ov4Y}%??o8KHEo9LgSo>}+Pmb&OEieH?fXm#A$G~>SMHH9= z)47t=)tpzTO~|HeDfSE!?+!?Fe9e}Vn(^!+v;L6%f?voy;pLNC+(TpNZpf}k+i!I9 zJaKOu7HW!WKi)6}i+c_|=qcjx@vHbz#~!M8aM+Oenj*BSf@`A-gQSGWZ-2gc+!@EI z?yRUX4c^mLdu%+UGmPZ{gH3Jy1uPY5L~nA|J=~)`wz90h^LiMp&RcG%uK0UMr3+ve zSSp)W%h1%cV(_qflRKo&4(I*9MMLoGixKlChNQqxfEituo$n@Ox}O(;3YidGez3XxuXJ+KkfFdrJ!2%tCA*NnSM48j&4%5Rxp(%ZCZiIBbY z<8}-SS_b1XsIVV@%vZ&*Pgkc@jJL*J-+R{{*+Q47uxFeuZl{qr%h)jGTwd)sTp)Z` zU-tz-@wo^lI?qn>r?SEWKlqyAz$^gd-g21Q19DoD+~b*L+5T*oPxVX9FyGacfk~2; zxVnDU&1g8N1714iGPY-MeGX3`&-zPF0<%0(q}J!B`*>Z|6t`=3^-vDwmG>@{D`xng znH~@&e3>X+>IQNJAG>?d@#`O_ADudGPM-_3PDuqdb%`0BiUG`!i+l56JFR1e5j+9Ch7H-D zr8kA%Z@7=l2U5r=I{hbc6FJxJ+O}Mk>w~-`X6I0i=T&eK>?=CBGN({K1go8X-Y*0Kokc@iz(cb7`mK_W zh^{ry54%D7PDZqp+InPN`j3_b{9M|$_S7`~?z=J82ek0S)Jx1@Q`4n+6`9KG@V@S$ z5p$XC{NB-0y)HUaSE^i|6_0$g!+DGbU~(7Mn--mL9IP2u4m91DEc!*r0&NOSlWo*WF6j0`=6ZXgl9}Jw=EEIhtJ!j%D^I zfBi{?&0uk2+`KANilYZ=VJyltn>DDr0eU5={~Y$pS?8WY6FW#cO!a@qRx zq-+KCD63#B0`=({z)~lrVtSKoK4f2X>z&%-psKp7)ZI%@|$C3{UB!L|>D{H#X_9Vf$B+Z-IPytPoZxZWfjt zQ~LmSX`bxS6@rR!`rNN9*iAE} zu-d^>0q_u1(1f}pICGHFe^ajCKf%}7@75K`p()ikO|5*NyxYOqJClCVWZdzkvNELp zZV?zUFZ0WpK$3tbj0W&zIx?%$;<6A6<)>fB4Exj{xNB?x`+nfR{e?%);mxz`IiEMK zbUn2&_dwGAMq$KDa^%p}W*Nof4$Ii^we=wPS*iim7;7Qc^E@BMFRU;{T$o3*(Xqy0tMPr{aeF;EH3gLhg!7PQco0+ zp`+1fV*nxogTXi4oE0BTogD5gvG*KoEfJ)qMLYqph2>LOA8uo<0VuJxub#}^O*x*c z>sS|5^gi~Zq{BcK1E%;%2DQ=`QG)(h+M&Y(bJt)^ZRYniT&ji83r*jb?>mKNZtu`fKiGwV1N&1=L`^TFO}#I(CC#nxlr9~|t=0gc%t zijeqN*0OPJ(4)=w#3Zpr7SnOGS`Hf2kNYMyFAu~y#jUL#sdh2pObHjvTPG72s67Hg z07;nB{dsmJD>n}h5uwufag)SdVsRDI%QrIRWe^n|p(`eCstHWAdjJjWNl*(W>m1Uc zWo*Hd3xqYBAdFi0FGQhrOYum653lYQZaJ{q1ZGvN?Dtq&0Te;CL?`w6XYU$^-MWj| zMT4YfDCrQ0Pq~gxZV3zU-~c~z#e?sn)~HC&F$1GiXo*FkS|T|hl}Hd^JK62gi=;M# z@;gt0^^+_Jna9%6WfM@4mRoF@^I?WVG=U5h-44ND$E_js{O$JEs<*?=cq;b4c|?ox zFmIBKxR1-NZ%R|J*$6-ugf_>?gV(qGW}x4tG8P? z^~**Je(q3KxZ`A7dH@F~#ALaLUmZ|!BJB)mb$EsSnY#Wx?DQf*LRMq(dgy<&@?mL* zpnaJE8kcn!nN<8>GJ@@xp#FkDL3=|g6jAHQ>B`>~iauc{Be`cZgHEfXr7&1IPhMVA z=4StM?mG)7mix`)QGnXnfWWMZX>=?7%9gDhKZ?TWaHDxWwgfF>LX@^X-Gn*ZM!X=} z1pq`%okVnHWVe?#w#q1JZoDs50FY+o-TjtxAWf%x(|$pfJ_15eW`5R8U#PG-6P<{K z8Mqb}+>Gk@H}>@m~CIsqL=?=Jbr(Ft9fcCFJ4_83E!b7uDf67&@Ij8|kNXD+Cw z;*j|XHhtsnT$tA}xDP(%o66~w=YO)*YkT^t*{x=yH%zK@e|vtu9a*~z%tRw#vB`(Q zx}nJSTj_42?*i0V1D+X&PB{T+tL3^Ymc*Hu)kj-;DgLg_qY9=TdND}cmZ`iupmK}) zZOk;9Q%Nm9qBp0pAIM@Hx1A^YwBK=lD&}Y~4eevYu|5BMJG=K*N{y!9^4PrN1~+E~ z!E(T5uA}@k)wSoNPPD##K!;P03T8oJJ9NZfhQE$N@xW?1U6{WmvJ*jm%%l_=989)` z%L&?ev%uFzj?ySAN&}^OzH*cmUWSf7-$E z{OI|^n8e&xLziyFD@ZcQiCch%%VvJIRU}BFz2cwZ)i!<+i%+b*U7bR#N$EP_08TVC z8HjMH?;@A`gl+8w#%3kxP#<*`;6XGTh`hYq8K#ex@n$8}+HLpKwl8IoA+|W*$vtN| zFCGce1vS;U9fsUiN8j%X+bJO~rcl3`Qle^8HId$} z!nmIV3{4<|4_H=O-Fb1VU-s_We=R9bb#uG*TYmH7WwBc+4ZNKCKT8|8NkPtAHs;{R zB|mgc>}B9w-7hpc>6LLW1TtP`cqI{Wdp;u? z>D3`qYC_8vkX=%0L{E`7R|#cvNwxPAB*%Si_in>G7>@gUbiGf*;jO!GY`4{o0zJNG zt+tV~M9_&`GTH)fuF!|qvGk*rFcngEeJRFQv##UmPkv-gdcedrC))jIzzsli$`s=s ze_syO2n(xk-7oUbg?Z;0wE5>2xyeY4KDT@fEEU7o9__20Wu_^<{_zL;M}+6Y(*QIw z{uzU-qOb)d#fs)#jObqL4xvTGi-pv0$I^)^KEjjFavwHdV2hOJZ|xpZ-Q<_Ns29LN z<2hoe(*5p&S~<9;g|kENO9(I^G(bn}@S9G}VFMX&b=W9l{XO)Z^{y!7Ti&IewXw^L z6`Bu#?rB~(ViM>*LO}JMQ*RAz-6EmUkttp@b8BU-+MyurtkZ=x<#=ly5XbJu!C!t) z=H_-?kS_I2IuZ zaBM!>YuCOE8ZCdjUcS*Lg%B~HvI|gfT)5+z9kMq`+mV^D?Kdw3myhc6`#7xeaQyxrh`!dQpR8Bi zd}<=cSI;gJO1Y<;*0Lz33u|HGpG>)H9pO7!JYil5^%haPRmf~vcJM)MtLx?HbCs7) zDS)!gdA0_kj|5j<#(RjCVcYf}3GGJ#O!MTAFUdHcRO=!~Lem{0s;m;u zv<7F|VXlOA<3tQ?Uv!`3Ld#HxsAhN4JFCH-h1lv7e<#KT1o*XJ2l?0> z`=E(px{n_Pru?tYhsct%M;jek1?AmE*m3(NXDNk+I(?Z3qhj-8F^#Ie=9~zz96-hT z6>Z|vd4=BRDfI1TFhF^F1)Fd1%844dtPYSg-!86v9a(4-1^xK0`Iw8~c4#4@LS5~Q z38V2BMM5x12rW-}qySsPYg=*Bhp01Br&5Vhpl^HAxxkuzy0Us*O~DKu(- z+SUpAo?6@mrwU}?@_YC|KH*9QR#rbSEQRc5xxT=1cph|rj=p||9$@Azqmf^W zE*O0JbeZ7F0s1XLN~N#|5uU+S1u&cweQA)+h`whWJQa?TC^i17MSnJKPxWN+A=IP! z<$K%-AC*C1FL^2{+^@NG%!2=om+O@RcdR+!gJ*H`Dn|WHX-mez zsE0<4i-)=<1T^aeA2U}NNx?g@_UOUfpR>qa(XlAMcM-(ps@C*!>4WE&fGuB=EHzoS zi@puBFL3hqLgV45MRaYy&knxqBersg%fNF%mZO0{NGg&)#7;TCKRdB`K*)`yPM*5~ zpX#~||Dgur%v8+E{&^k-$P`8SQOyFr6KD30dLx_JdH^pjqCk)-wpC~Ab>=gt=m}}G z<-N9NUy{&wfz$JOjGC8r?MbO*zApw$W|CqeU_=RlwN~%DNyy1Zt&a(*vxF16XmiGU zkZ}e$SBlVT*{Dfr%g>KP#l;0$n*iBktG6@L{cc>KP&A24JjS|Q@~8njR865YxEaP#r{Lia&f|IADKIsb;r@iz(8ry1XaYy zA^?Wa2d?kj?u#+uAvliWnWcmgNF)@xSA|3(0j>joJUv82?*suUOe`AWLQe z$Wv|r(t@tj8>aPz7-{bHb=rBoc2L_Tgq*lj0j}q7-9%l7!7leB zr`?$Wl9Rr}#|(U(>2zP>#oEMy^)TQvumpNf*ZW4Pm>VI}P8YR)&w{m8_V+IQZyIB87 z&;+0{E$wh-6Y}**J0*pHU->bp7t?M=frjkO_*1}U5W9TOcXbr-{q>g3gaJSyzAj6h zvhitE&@2uGr17R;7hfZu%0~}C4qGG7o3B6{K0>Ry+H(Ol1zkr8kl0-ixs8cKKE);T z*d>QP%c{s7aMN?U>oek>Xabk_?;rpR*C>CW zi<(o&`Vx&&AlVjL<5pXX-8EkcRzrU{axM_ZP^_2X^^_&?t+~F%UiU7#T@AuAi48_e zcDv}e()()*1FL>W!}!|`WcM*?YUL6@tk^Qr_KVFXuP}K*)^GqDZn+e)=X0LA#Nsbv z*1wG#$qZ@;W*w+Mun6?V7l_yyglg^m-ZyBcD8i*}sGq?XLyAx9I>2Y`vCDdZr2&_v z0y{2i36d_|lQLalOX0-AM0_XOon2h63keCSc`QDcHUEkd^G%|2uO(|vP6XiK!4x8%3)DX$Lj_75PNgGZK+Vwxuf5b zmk5^29sjXCH7*-K*P%$}(b||{$0wVt^zjJhqe1|Bq<7aO(d(sN@sl^V#h2e`8dexn z2|$RVG)fp?eT=j38Lq2EL2L61QZWm%Hy;o%oNgr3UII-hT6Qh2&k5BIICFE1GXx+F zg^Stz4)u#*&jEev$#H^d9T+A}{^XDJ6?22PzYKUUE(18TN4E0=`w{pr9e9=tw~?TembV`639fiSU;51Dn;F^Wwf@>7t~1a zUmeQDQtnSC!)*r^9N)%$jyTmBcr-)rHy`46LU|jm_BiCnJ2efL3d8=>an?TQtt3ci zhzY)}zZq-ygjcI?-W$H}ZrK-mU-bfL9Py!}3PFJc9e+Q)!LHZ}uooS~18>In0D3|= zy!v%e$mRwPIluULk=VzHY$*$104qEJ4WfI~@RJw%_|7eALz6@eRjOlv z@l!R2ySiMa738;!47V}n1=l`Puh0a-s<5(^|~}VY~6H3V20Qw z3K^|Ay?{8PX__hu<-A7L{t1XLiAw1cV^u#BHmo#VXWBMDRG{~cpr=R;J4F2#-<;M; zaIE9$;(MBT_UURj9ie?oiB{DN1KG5|4fDQ)eXt&n3B zKINRXA4t9477k7Ss7U&XG>oDfr$csJ2!s1q{9bXYc~nPNKi5Ud9`7j#A$`S%ZY?=}q(hD#rE9ub_SClq4m7nhY<^{*=KHw^+`aReDwvE+wOObF%`wDNiP zPAToO25&h+X&}}6=gduh@@}ix1j#AhGc&+tev)xnG4<)l>CRaRV4dfGD5E_oU|4eh z3bb0MLR;sett=klaY^~wwF<4nw1yYA>o;ixV0pTau`=t0hFDrxec5>%E~%Aupefy3 z$1X9WW9ZZmn|$eOC!jdxrfpffB&03lZHTY#(YulPc8yuNqzMW+x5>cfg z z6sjqFi8lQPZO-spdNo`uo=SQ@PdHywek>8(k&lXSrVWC0OVDd0Um`gDKl*DNMY|8RrgL|6A7ax0ObR96Am)eZb5Ck{Jppa$fYRWq zpzwkZR>P(#d>5NBH72S0&e}5^2u8{94|E&CG^FX+bBes|x{3Q6H_Uof z4Ub&f&I38>`NsnB+f4Yo3G+6=40e;#K@FH%n|Z{1e658xyh$G}V_uf<=)*Fe_NmVNTwUi90;_PYZ{YLy>J zsn4FrGI@3A=Ho%0Ams{Omn+LgYdt;9`ey-y-G;)Uwed?vtHSRAM@i9??-O_s>GG#0 z1OBdUySBD-?NGBKyV-hvUq5^;nQa*_2wWW)gvWplfhE{@@U*9W{O7z8?D*xfpw_+t zyt$>S4{r^m&Nh!DCHYhN@@PJE@wAE|%Y$2)aF#*q^)4sm7>*(;j31uXPw`W-Hw0RHJI*rMi@B$xY9NyN4|$6elNdPwh! z?j=cT+8rAyxw*@A6STeBI;BOgo@#3WG#A>gszfT)g>r^>X^uz=Y$$EijPlwbU3&kk z&nT-kLYzBA;rwK0OJR#;<+LWWzVcn30GFMvVh~ra-5qNE7RZw#i-pEqkn7CM9PK+-- z*#GF&B+{r9Ys=z}Iku;4SJ0yIOyWasXyA!E0+IXM;8OR??ogSS--;vSH&Y5h)ZvM5 zGxpL3IyxLeUFrCPTRmuc?~0Y1mgP~&m-xfH6rP-Z<2mj2QYPZ;6LTBf6?*!yQ<2vL z1p>z`B`{05!wsfLi1Y+J$#$@-ZVB81H$^;%pV>)zfR{6odBQ1!R! zxGe@rSL5Y*Ev+!i)IDWw{th+gPKA#~#YMPIFB8=ant6|FGp!aZ#Kx65LB@m`r*B!Q z@K$G`P$EG%tIh3Vzj;`w*n(9uN=GcO^n7^rA;_qnQDUrvBm}WIw%xsu#K{%fgybZ< z6MT@wO$rp}Inw}OYwQhO=C+s<#1;UTE^&aHarKjQ^P;hEuNZ*S>c5Bq5UrOW)Hvm> z3<5<<_FsJ${wlTWLmMTAno94cpOXD$*S!1hLbv})*XI8}%>Eyu%Kv9?t+?0(puVD#eq(RS&x+&cufK+3hMJaj;@Is+b#fY zE{T)5{=5F(v48c-zi;rpN{5(y<21F21u?q0RJjbcIXOt3Kj^$Y@uqxHNfHOFNq~P*OM48S;MU5S(NU|)zhjB| zV-8YjQ|reP^!%rahdm- z`xAp?^Fz$d32K4`YM4GU)oG+y;)JPsUUgpNGOg_dort-@%KY;_3vPvb8J2XH@eH!TSU1^KgsB^YcspSnZR^loek1DpT%a56Vq zmwWbao}u4?n;#kiex`_!-K(jGK#8;{fX?{{s@PIct8u^(}QN4f$iLC4K zvffC?bW91pBdV`Ew+l>f-+!^yi5D!71ng+x?!4#UKuJYC3_khmFlKj>>tOJr76O6n z*)(i#mDB&ZJdn-22awa%fCK!~1&6eTcH7m^;$6JJUgrcd(2J}Jpr!mZn~=9YmDU`v zAvGKgI7(7&P3q|yoR5J}!=!Q5-0}a*6s(Xx&5|0}qqSZ?Zc$^S^|m}x>|T7L21U<^ zS=j0B);_cBr8|j;EW_-@1Lz_i`^zS^M{Vo-{-Rf(rQFG8R~Nx1qh+z&n&gwU?G`63 zH6RA>!e6HoP#m<`c_m`q&c`Xx9rH(whX#>8~hZvQi2s$h_G_Mj+)$cqaq z0Yx>*%Yt~)RKZJAXUBNyyh20)_QOI}AM8KDyHI!nLb6o14(Hsn3bW)9+g-+KZ(U=L@G2W1Bm# zGW(cf9*ymNlOZ^-E&Td&T?Tib^e!}O?`2({T`@LXQM57T-y-m^<0<2SDJ07iuqXJD z63rBjn|W(4i=mtSc5Skj@2fVw+#gu_r%(B^s1ke7TFtKtEEXWvy1)p6l=j zk5ZHBBl!D2O@XD*pNs2!d3v+Dq}tCYd`SYFG@-?$)g`6{aHPWU7$b9EgNi)5#Lgpq zN`Zv~{l|=528naXx2d*&*>uav?3BBu3}QPaxbM^U3f9g6VUV@I@ZhYmhyCO`F=#N* z&}}~J@;F5NhrG1vR_Fq_#=PEeAkw8*;s>~Rze~dQcr@_wW($yYcL52(SilVo7YU(2 zVC!(I`s|?3+{&Z%^c~v6rECveA2JsAoNsPEhV_N9F~U)2d7cT|RsYzI6yxOKfj@Eo zXn*=`{K!uLC|8Ck%M|}_P01}{Zl$#;&8pl!9|R0HKk{Jpzv`81dgzEvOlf-E$o%*y&&MvT7 z&)WEZgqpff23V#v2yzp>{mPleIqn4Hk*X))St|10Lq%3HpHn=fK$mbTS}y&d4F!G-Nz{nJ63cph$&gO zqE&3`k#pyFWdVB2J+qT%?NWUamudYKzlj)%k1#M537d?8J7Y^+>3S_-YS*sj(T3lj zx)GAGTT>!cr>)EPlL?x3W&C~RtJ2*&218O)j)ezxB7I$oLhrjNAEn#Qj1XM%rXG!z zX2&_;PV}i!Xzuo{uRK2cIEondMi)m`xw79C<$j@iv7RVD`%ig2|EPNS_V(3X_}fwe zKJ#0Lc^AK(WL9DEhQO*Q6j2-WOy?LCCDuB2CqYUh6}cGk_8~UYN)49KXIiLj0y_T5 za&RwKgSBKxEIN8iWA}v`r$0@vMXJ&-VW?u8NA%WGZ!aTfX~`blpJI^m(f6Mv1Vtif zl1O~>z8-|OtMWoDHI_X;QK|VD9+4U&{I>!F;!OO|r)qJUhsO)=dUS_ZG3<&*l}+GQ~vOpM4wO{xk5P%B)k-z{>{G4?c5j zIcSwMZhi4vEzpNt6JOv?&?$KEgiGEx1a}>x4qG4ZIP;Z{S!^uVKsW70-Us}P=zxJ# z&nz02J_9hu2zbjzyj_oReqar-Rwo8(%M^E)x0>E*#Q4}g8Lx$km($vT9^1}$9Degy z6tDF5lO6|y^_-i|f6cvrrL5zuW0BeJJM();7P-GP{Fg6TYvyuy71R+kAIyOVbfb!DuFMfZF2&9Q`@pK5DDE4BvfHqFp z!AGK;rx9RSs@)WXtXtKZ>vfeYCFx2h5`a0fr`lIx%!cr{07#6j{XqUPz9O-k4)0%b zE}Zd3^Y^801Ym)+WQA?PEFh6|T*w?pb0hZdty*EYM%!UdOW@Fhz5G)@ayZUlgL{${ zX+3R^s15J92p*2L%K|sX{VP}ku28PXJI!Bw$EhFWftGShd~_BHY@Rff-P?#XOwt$A zq7}2ynxE7s>kPk-I*X+Z3U%01YTkm`<^?}7ttsf08ls=SXr9#^eCzUMP9sQaF~wN*rz&f+W=fr&*w~K2k*Tj_wUH!Q0AB|oarLG z24kWv8nfKq4CIb66m4xBjbiQa;+J!5<3OR88#l_MCaaO0@J{v>Y2^E;X*aP&_l_Qqq`u^WQb^Vg7CWpCYR!$N1cQAQG>a192J2u z1wjkLrl`c~9zseeSsn!-A3nbiSRhR?&p zJFVu2aY3kxn4eKmAj7a-ppK{qarTwk$rxDESNs01kp$&``^yDQcry-=)&5skPyUzU z@V_t$aFgn~++fM+v~#JyDm)>;D>5E{KyiQ9VE*Ss_x}e{-!)4FjwWftbE&&aY zLEM`qwHO7z_tfN*=k*J@6jN{n!lRWfp^(*4fOCoeUX`B0oQ6~YZ%PY0~e?Ubv@NP z_DNvsCd`f^HY&=!^WXbjE4k9Qf=@z%pwKdr_I)M4ND*^?+w_1rrE%74=-p~NxAj6S zTxz`Jovx$z6T&1aD|poV5o4TUCpZGBh0{+nOuUbHzVS^0%=~9Ml)bJD_IY!!T7u0^ zV^X)L-p0J;p(H)ld{6>x`u73q!p}3_T8kFA@-2bK>MEIdVxIpQ>!o+>xt^h^;H)V^ zc4p;)7=xuB+8ckm-QZI?o5LHkAl~V~o)0omc-TL7sQ-Jl$g7>^6ko63q>xsa**qrJ z0O@ax+U`++LU*gSJ}~wFX^&!4m+_;$LvnsG_)4ym#<}f;d-T3E)_1zWI?Y)z_+ob5wmN@o z^~+&g7xRl8^ty!g3BxQ!|9HNOU44zm{^__wvrmZKEN249*2Jq#M(U(v;;45!2Z_xB zDt)KU0y;$9zkWVQ-X&i02V7Uckb1w^Y&l4t=^=YN-qhonle#Za@noV12`Fq9V-gTH zXzE826MDERZ0)>|MBl~6-Xd(*Yf<`&Jx}!%gdJUreBvBF@89pLaHNlXA_!eDf=se+1#MB5vdxLy+koBH?zIWBX@c8s~j7dTARbZ z@WOS$|Ix^~hcms$ar|`Zh$$&0S#;q<7YQY~w5R2^XYP?$E^CpwQ<5b+x$PmXNJO>e z3|-LXZZbQGq*5A@P1fw8wARAdO54u&r_*_!^WXXR_xzsk^ZdTQ-{m2PuTYiii{9_x%+Q8}0#8>5Er?rs-b zZ6M$xByxUR-NxBoPsKT#550Ds`~Yg-F44o=9T}eX@Zxh*9dO*``5v~xC=)TKYP>|( z`jxMERfFZuUC;JRi>J%i^W~k3rc7$QGXnID9=9|pWFySiB(H!|wW%7B=Iiq2O{+Azu?J4Rgb27)RL56M<=7kwtI+V^h?cO%L=9@Iq!D5` zECOGicjyd_cPvhuq5SfmT|`kV7f$FQx@}rB4>xTd#LknbsrMTka~8J3y|!N$o@GJ_ zTwT=8sB0*1m4*$v9Yy*(2mWy_AyJV)Lq`o{A7;KUu2#2;WqRXL{*$w^ROQnb@lP`7 z#;!FE2Q>uZI*|ls7sH&=f4lBAaUo?y!U8+hUGd0*NWBZE$5K@AZiZsK_2uLHd8|E5 z94|*$wUj0OHp{ndJ86%ZIFU6t=2dmCKH^zPpFK2Q45Eaok9= zp*OfC)HIhCW%G(^i!LHh7Jk~}==39MZ*GW%SEq=jQM~xKdP~c&w6DHOzGCFTnA~d1 zqKEDvq2qd$UvIs&HCa_AG7d7`p7KnzS7OOCn#YAdXKH#6tKj>PTGMX^`Nr^malJ%fRvfrxRIk`uroFIt|`_%&F*N$XpUn6&=eND66HDfFs zSI?SEG88_p=H0$uvUrgoZ&G<72#M(?!m7LFl6Wv$%YM>$UTA>}GYV%v_d}m0aTbhFCh3}2Lu(vn3oS(Vh|(ec0SpEcfydWY zKtWTG_Szd%nXBt#5+&^>;8QhU1nkFw~j&0nNx3@xt&)ne<&AX{E zp(eOFMl8CQ@b3+_$5lhwP=HL9=wGa>$cswq!$gMoU%YaV35T3f;*|Eb!~`Y^X+R$7-bf+zi6r=wi-9ytTV z0bzLMn_%|aEfxMzGn{+clt8_XPcBrg5POx0fI^xm4ECl0j8P~LU*+CWDIzgGf8l7l z13TIeBO26MEI%(SEg@w!P7pPN{bI&a5V)7S?5Sz_rG2w=IJ(I!t+2MI~w zH~(UGm_c^kee8@n&ACulYGvz40Y5TT4_tNvwJi&@Ia27O0Dd^L;5M5ZK<=5axd02O zS7v4qPSSY%7658G01L)Xpr+|W0~Wv@V8E-h%>j&M!I7)U^tye^YtI_1WB^0BZ6{zi zfQ#nS0pESZoXq@$r$nC-faKI)`vc{6t*a ZctBP*e#%tG1<+J~K{bvR1`v0sZynP5D5XP zB2DQe1Td5cNJ&EP1Oj)(z3;g9j&bfdd!IATkBl`|d)Hg%eCB+gC#Z*pIv37yojY;j z#DxdCn#LzioQ9t`@z)IF8Q{vVbe10AaPqma&ixY={X8V#i z5trlm*xtQMV2oos*5nk;3A~KR(SH{f5ukTfX2t(%?QJ`zGa9oVw|kF+M}Ja!m4bG@ z^zUbkHxesH5z9@Z@Tbc)qwpY@DrNVe{CXFE=wYvEb=yVdJl|26e4q4yN{uQ|u@gQ} z@VC#$M+b%5qqeq-e*sqpH6jy)?k77YSr#mw1kTk5QWkJoN9aL%L$1BLzS?}SqK}UP zPEH`E-hMD}9iTt8crUV^e(AA50R8m4<{G`Mx2Ru#-ePS%51gsre8~wM{?R`T0uCoW zFan);;=~>1M*87@@?!GIU*)w^ddYBv3w4(NIOR!i3WSn2@MR%0O_e$nS8(c*%I&80 zPHwH1sZ0i-xGPLll>k)y9;LEHmG{}#lO*7!0IzQ?z$1kN!Sm9j(4&P=E5k#E_&q~r z*LMGMzckP0z$zY(#`Vsvsi0N-8$%Li+ykrz6poDxJ)HG9SgswV?o=Gzz62D1dvMET zZKGFuAdj|7RO3bNO_Np{Rz1#I3iw3qcE<4&3tM{x<5b?qs!|tyzMT~wrv7Hn*Rxk_ z-`M+Mgv1(ue*9%F3EcMP0=TrY8DZsKgO5Zum8y+M4uKwlN zoghm3)OHcPOAHOQ3YdP4OKv%w^ZDtR%$_gm(Y!zJ)33OY*C#hF-Y0EDbkwMwLOiWn zD>TZ2Q*fIX6@S0=S;+9`fSopISgC)?tU{Xug+92Xy7x$`H-cF(avi!Ms6#3Eo}PavDN^-;Y|f3Y0pw2g)|f+F{%*@LjkJX`GP2$8hGO-^ z%1uv8Y!0Y}T6s2YV{4nY%V%g`ItH^fqFB_$DjU5KwCz!x+R=I(K^Z7pT}O4h*eL%J za_v>dMdR*4p-z<7^TqgIb3;1(5_<8P-&tOO!^C-u<@TswPhHAu+98xWpTDWs74|{e ziNig2sd_-wCEh?zbZY$hz+D`#{{*BF8mTT=ux)$1ZQJoos?R8tiX$Cv4tN$5HU~8> z#vku}anH{ZlyeC)MKF`_u#+(2LyII)rqOUC*OtD(n1QW+`m zFZ#z@g=&2mac>EEprMQz7ico3C+!xm=z`F^+RL(( zh0x;zB5ByfNP^mQu+mU--K2hp@HFZcYAzd}k8 zBOl(Jm!DUjmqor}gm)A3m8XI?`*Ma2dNq8cRv|O<1EELH!F%xzUjQqnF6jOm)O58z zO!X`r9<*FTsIIVn^1%@c(c#5ocfbib5wAIf_lIsTuT8f_9bGp(N{0FUoVobGNOk}5 zkSAa-jJVy=5Ux9qb=Q^ng(LUp>@Byqya9XiPwa3wsEPMaD^%BBTw;sMIIBc1 z|J;4cg_NIM(c~)W!tAem&M^YwH||~wLy(ILBkgi-fFs_sdkN=X+4=bDqMq_fox@PS zJ`?ULjMf;diT^0F&k74;2AXIK@qQd7fhoE$)V#qlU=}4=@7!}oQlsLt3>e+O(rThO z0I?S#K+iJw`aorvY}hXdLH}K?{-QVkceMmWBHmwxTqtOC^fro6zpiH@L(b9c;Pz~M z*SwjrnisM=DGviyoK|9{!aBdA_hX26Ua1nbu|I;W4~Y@2XS8YFsi~|vm!!nfBUN8g zNApEbV!wy0Ze_pOg>?GZm?gFSlf0P*b+i%j8dZb8B`X z9)S}m7A`NRG)S&C`^kF=jS=H~U9hNCja$oNoZ8_Suzqiu3+3x~O8H23j z!v$+Dw-^@Ap|z)`p425LHESk1)?i!ysM#R0Z4UFr3y9PKz#-E0cFFb6<6_|=H0FF~frls-@u9a*Ct{p{^(8r92tTbp#HZhD#KfPi zW%_}%1Ppje)@+y|2+%q;iVBu}_=VVm_kVPxkISep1uSaH><|(p}SuMA3Aw&Z{J~qgl1c1z=v@mZymL z6qCC@P2!z|{$L`6G4xf!l*s@Spd9>HRmJlBuA> ztG6hY{~lNUw}34Ex{=WbrRp6X;Di70$vl1%PXK@|00rNcXw#4X6{!1fKmMPb4W|+uN_74t!j=z)jU&z6$jBL#C^D@)qd}I)`EqVl^|@v^xdH z6&e|Nw$S|6ZT`UEE%bM80sn}@>)sIoQ`S`R02F9{#M*P4oJ>cw^BzqZI$M@&ETZu& zlEAsTXOxP7H)?87xD||E$jPIR#~}2KFDm(8AoolI;6qHM=$Lr^%)dvCTIK}<&i`pO z{LiuS)jQP>mIEsTXJh1DffgBrgiI5vGo{xn9dJ+(Lbb^zXwp905zDL-V;EFs8r1hu z-mPev^R_<%nD@8@Q4?(FVOMBgwnjJy4IWC{-l8Lk0!Z zn|WHCFm!-h&$wp`tgO^-DsaIdO8m3#ABV8aqRFO#2IR zpFA;L^>F%9Az=70Bb-)I@gIU#o9Ir{z-8n253QC&C|uDU0J|;#ob=p19o|am7OT_` zrBQ5g)8YJ?=GN{tIYeNQW_CtjjsJr&0dQEg$e`G?Kc7Ydkh(a=;Jo-Jbt`+65Ox%B z7-#^6jlM~l1OR*%9qr8op1uvZ952Gt$|mf=)NXSQzmIlV5Y)y?s~$jJxGfUyX-Jwx z-LIE_VhJh$_F!SRnZ#cvBPEZX#i>%AQvpPV1&|0lFJr5T?-INq8k~FaO4dz}I)~J? zCIhT}%@ir40SonS5{JnKKdzr1K6$w@f#~qD*HaCj^(q;_w#t)$6LtY`giF9*TYy0; zusx=Zl9H@FL@O-lZ8f3;?d8cSX?Fse9#`Z^)J|YgR%&cQTu1;bSnLB3lJZAC{{(3HeXU%Q~v|eo=J*V z_uiq~^yT>F^RH}eRO&YSm7|s)hJAJXksOI?%0WxJ6Flw19ZPTxl-B1baX{KH37pjxQs%{)}*H)gLTcs77<~ zdaYRYWj>YkR^y`R``r~0D4Nl`WW>TD#%Ihf+&8E`rk%Z%+oSF}ukSF#<%dvm**34r z=f6SL%So9E!A({Jx3BZo<2pzik_=aC)@Mj-eYiTQ@q9c5R5yWyTW#0rq)x52zRhEJ zl?+t>fpu(#d5~H4_m?iqF&N!0-rILV9$#jlol*=Ct<>*gPpj+pAH2tqzdce~fvfJR zVr6UGvW>gDV?q5nfHY5UU#jZj>oA+%$hpr}+zbFX9N-Hq8e>DI7}WM!X9ArPS`Gj( zZb8Ah73U{)H>-W__ciiO8)82%fqqzOEai@xeb*z|P`2Zmq>z2|T}Rb8DACBe3*J;^ zhz+Tl(o{bLK7+wm1vmi^ki(@g!%q+RSrPn+A<8y7ZIk{)Si+~`+8;O5wx~a@H-QWj zfrU!m>QXzZs@og9>NPZ@cD$*ElN)!#<*()2j3N+|Tp|m|S9b#-=5_HiB?F|wtjrx^ z=yA@$NNuwjPed>f5g3K@A)FC2DB;NXlnt7z|JRcYY+6_)!6RoN!~{jHH;h!)H4nPrm(5sbQ}U!?Oylg#-t7T%)XUl6^nmu&@pyZh(#Cf|?g}To zL#3jnFbM`~IrK)%B|=>*m4xr7_GhZ-JmRM4hxOroW$JU|RO{>BsywV!^UA)dq_jhi zZj0cTV6{aR>>nPB}7aC636$T z3qCb*^E9n1-bmFFear%dkHGvVXI%ApE+1}Y9hY5bosb-fQy7{6aBkH0V{G1o8hkF4{D%{P;Ec#rF7G04cAiFnFz%q4vh4 z+WrrtNVN%nAbH>e1PmM<<+;tyvKK%NW6yLPz)a6ZZQuFb-GAxxo76X%tz$c611lQm zFc^1N70>rC%yS40fszTbX~@Npo^cyV{vAHCbZ@f!kI+iTHrRIelFS1gX`OIyno6vpmTUgDK zGHzopR>Gdx-EggDRL)_E+krRwa_kqVsYdjzy6tY`f9N3>!QD-@v%IoGt*Pz-BJ(02UXc@PmYk{~$tQ3H>(qA)(v;G5%C0 zb>=0cy^M-vvgNRhdX-D27}HLg<=V~b^;BlCw|1glI z`niuf=>c-gfcb%Sm^_uf^M;x!9aW5!=gMz=dqw)qAM=2+jL^68EiCXDoy8UL$AJAu z3*{YY|zk-y`JFZuwv4Z#%z-mX{oRvzo#qRg2{hzd3`EcX1}us zo5_G|37kl&Jq6N*nn8cW&fg_;qrAHGbt_eZ9SO4Yvx8nP9%6AONq%RCL~LxDM7evS zjX_?!sTD;WKjYkubjG8}w})!XPGgYVrkURp#~Z2fb>7O*40x}J43~{zE;9XdfUjc3 zvQ71vZO4O7`zQ%>ws)}=^~7S%Y#^NoYTCl`tWsuvE2%1TIhDIUZ4wQgt$Tlk0S)Ps zhwB`BPezY(cX{&9b|6eEH0G4dT%U%Bz6wtXM@aF{EhG}GyT(%sk0eK4FV^D}9!HzR z5?Q;6x%JW9G`a2Zw^xKqa@rkE-y3RvsL~k(+rQAgEsPx9sI5REzuXmVpl;`y8Wt+K z`Kl7E_`xwr8or0r{fL1Q526zF@N(s>w0%fSpbMp8OULUo7pGosc{Sr|H&(>`Z&`QL z98Z2mjzLjH!sQ~{=Ix#q8_a;}fm`V`S9uSwL21*no=VcJ7L<0O$M;4O#Am%xX}SM! zlyI=rdb8QVCeQUo$pmuxyse1jw@M(MuW2stsFR9`O`zC>Nmey?Qn@+hxM;fsYD`Yl zpcLjg1XfKQ?^rcC>aV&eL$yuDNpuwxGc`GB_oKOS&S-u~d}Vmd$UCZC4r1id%>dF% zXN%k9_)a};n9UGMVALx-+!;cPRbCOn4p@6O$J^N;rTPRkhEtF2YX7$h`ZxuCf=-0 zrtPR4VSb(pw@*#ZujWqyxy!kh+owJ)EGB(pH3H`z6Sm`rib2hU!^bh~k+;e7=Ow{~ z#AK{eS{Hg$&$>JT`#XimPn5Anr5u2`(k_+Z%Q$?V`f3f9x6LgZI{EK7JItz?wAkSkds& z%&%)0PRI=CFOtYKr}FB+$2dCVL@`gRXa{6y zixpd|MAT82oC0}LC+NDPV0zVZR3O(~ginvkYpUPpoE0B^;Wwy#O81NHiED;Qxwjym z6Yp*!lP#R=CsC*RKikp}CYS@}7B9B-xHJFE*2bM#bVvz+x!;i9X@xl6>U1wfK!PaX zhW|$Sc@uQknOD$zQm@(h2at{T(R+O(3kWVRX8khANlHQTu zwtE)TO{}dCo~@15ZYmvrtMGu9b-bUI^A5KmU&PlI-ft*2>c8#N|Y3ZyoQ3 z9@hqdc9MrcC7X%v+=&OR{YRsEr(DuTc)eCQj$g-WrioX@pgMiH-B)L>l$Ng*uu*J+ z&A;$RAKR3*%}E)FBHJwF@9zVah^@ueEq*4@ZNXlRZB^FwBc|LXnG)hQJ5w9G{Orr! z7|ES4KuOzqxwL0NT|W6#J9{pe=yjRPXW>es=3k49w_-elbH}`7$pX$OYEG3`RvQ8G9d<*D?PTp^DyVq;%z0TRU|5kYf#4Q{}GnB9PXOQa20ZLvO3z z9YjyUgSJMoZ4TOnte2*gX)9%dxUuro&JZVWrJbDlGmSUl-dgpycY65w5?A(?ue&U@ zW*Iqj@nm#{AhPR?#Z7QOg=Gz%XET#R2ktgrkt_%#J;*gKgUQ?an2>#zcl@35idif4 z4~_&zTK~qWX4#Z3Hc>AI6`rjLRFImV9p*KGJoo+9L?W1XUBqGMb|pR-s!$q8D6v>@ zp+`EFc1fmkKsmHJy>^In+&>9ghLpXBrSFdOgX3#GQTmBXZ4?2mWduYC%rgi+;_OM`mywlSr zbj=oYzg1znY-ND#`y>Q)>Q{<+SysNq?#zuFBMn!Yt2YTB0&^^RQo43N+7!%o#)!pw zl?Y#+^UI?WV;w#!C`!~`ms;^?SRUDE@G(Dp;&{cv!&cAB&8z}+eI&+$pB$HEe_#w_ z$ccJWN@>xFuHGK8_IC{t2fyif04e5CL;Ej54b?C|@$R zZ3`f0_cHD~72=&6Sa31EWz_K)F4>ZRXqL!j(o(2I?}c&n7^fe)SFehH!Y-T_i#%9{ zPRM?~rN`CPaj?_=ImX>^|2SrjZ@ij}U_M$4S7U24TMgiDOQPsFp=Fl?cqy>MaC<@( zU;C$gAf?dA=+^DqERmsDAXb}!KfWLQga@SkOvZw~jL0vvr9N9+C6GBRAHU1d)OSnI zxnd^+zZx1V!fXuY(%nu>t_;9$B!tqTyB>KDk${Yfg3hh$AJ&Ghz2ax;&RP^9v}&qLIGY6WV%;@mnZ)t@J@_nfvksN6^+#pkp^i;$qM*-{km#{+ zO)24+SXwt})YCyjd?CdzIN7){c7RVe$H!#p>C9<)y<6=Lhw_xrHbv&1TNUK49WhC( zgzjxL(_`?Qr#bw}g_zjd(($MYeKB+m3>8%}r^L(YJsWDilEzKy4lN%Zak|7Y_cy9P z;_dwL(dsc)aUosWnv?{j<}l-#55*70RuW(66xUxkJLhStX5-(EDc~yTP?gLaHB!dN zI*wf#Ct1XG@4JAVVo3yoUlL{ikq{7|>4F3eqCe%9uP77u4cCQZUj5LGnRUIlbv%73 z)M7#_PkHC3Q_c+F<`;gG!pzu12I5sD$M zm^9&FsBg`K)TVARSNnHOBw@{3+zzJ-Tm0+^+XIu%=6EF9dG@#+!q%3z3di_?7OuN|z}IQ?P9HOgL1*Rtkw3;`6KyE< zNZGYE9!_f$#PT&n>pKi6DZ%Q&%U`!9MM+DZnJIn>UcpMcFXVA#?y`(nh2XXv&F-Zn z{Vbc>ZKlejFQ>4_9%|wex656W3S$kpmv?CBwn{$z%Jy9fxzSTW#a&KUBYEtnyb^Pa z^;U@`@lxuW)4-u=7&Us2!cmz)S)iZ+{0Q)@ZyagrEhaD?0uwK%&KA{KM7q_ z$zk-{ek_mi@>0Ldkb-wfU^r)n@sH_1-rA7|3dxF!VsJ<8<_zoc-1hY+a?IQ)ZhX%* ztt~Q|Mn*mR+2Q1;%~OXD57|~~Xg0kR>@a1o+H<=~jBdHfM&0h(>5n@z6zgvR```{- zQsq1CuS;FMb`(5rC7bbl^uOtR9`at$@J$bknN&usYJ+L$P${CLrR3;F_R)l6-yoOqF0lyIc9UBk&(C z{{k{dk{Ee_48hOs@qFg5;1JQ2lD)o}P`JbeE?C(qh(V(YgLr6eCS#*L7yh=y%eP~r ziFOa%r0u!y0DO%cP1`b^D%FmidO^S-QQ3*_9EnZ3lBlilk4i1a5j zc&8djvH9=oJ!%>oGC$sOtob+=fq|Mf>(Avj+eGtkpX7#JFV0fU7s4^Cxi%Ykhj6+# zE(JT!?n+LUA1G&c$W2Pn9^(px<6@0;;0I4!Ee4PGU0g603&l>UrFik`$5sW4%u^3Q zqqR=PXQa2MEN+)Klis*ANRS(*tHV{cf%(YiKQTpW2gT@Fo9UG``p%k>_7t?oo^|2A zN9KvdnLyL*nBAOyYUsSY1)tw3_vXJkK3w4uT2Hl2vu=kS5zZM1uqgt0Yb>2&BIAP1vm%bzPo!uE z2^>08u88b_s`$G6%7jC6$3nHf8yhw>Z#}Wh7bOJVI`B0I?4VM^GLPG75bQEH+CtGm zgQpnA6QC`q&fMJY&lvgg*B{oIBnYqgOp z^Q%FXQ#ti6bz&>-yae&%7ii1jd6(4r_2gQ^Ok%2xDs1oBvX=baV|(`ytH)EOQ=>D9 z;ALtzp<$@{(CJ@{(IL8GpJ#e7T>i_Y}ub1bsKwc2qcmyQT z;lrom;v+CmO{B1C`Zzmv+HT2wxV$-J|3ypFO^KI|H-t? zKR#gTlC0{blq!Uc!Q1_EA^3>B$s8H8h|=~i=-TDTz*w*#_a_mc7DQw();atM{7m*Mwr5W^i#L}#PyOYe3b7k!a9!{GPv|S+ctd$SxFHm9Cbb9qX6&p*Y6{_}f?zmw9pE@;e_Yi8HVI z_$T^Z{S1&X&5!FnCpVSxUh>!swuZ zPubluDz|rf(eu*|xOatF$vY~)+GVKqB!k)VM8nCano_^bv-m%ZcyO(huP5hkUsRCh zwc=hGEaWH`YrP}DyG|60eE4`o2BaI_x%}Z0U$TdXg-XGuIXo!f`L8=i-7;hygC5Fn zY21if=;we+2hRdZr%a!YduD93oPDQeuA~9cvu!Uka3SPl1nV#7@5&X69rvCDK4KT* z5b*iwR^1nQEABen))#N41sPM&jTjWFDDpKu3@}Vk-WJ(`WnO8Q6VjVD?Kr8E{xQKH zCIT1_jMLm^U8<$r2!}Am>l=iREm`=x*WU^k1@zXxaM}j1y9wV!ON%2V3xM_wCDlUc4hPA>;8U< zIww)$C@q6j(qm`X@i~_Ky2`^8?AN}~z9aD0*k>sM)gfs^p_qxcp4G$WhN|-X^~hOh zTmzY#F?L%51FLE_D`iL=uj?<1Tc7NHc1N>iPtxm%|Qb?7iYSQL|8!3Q4rfwrHp;4iUz zcGwBzKA`+uU3D3>%02`8FYSMqQXTU5j(yplXrTrfbsV_dQBjYV|IVt)5-UAjv&)@0 z{-z>Kx(XKY9_BSI*;(ALQO1v_Jq^~TRapa>y_H@mSgxJ@ht9uiVv{K9a(SdjjTA@S zg@>~Fd?NNzliocNTCZMu8M786CeGf7r0N_ipiqfT@9GGR04F5>Qt+=+HLBH7i4rBk z_)%hX0-E6M83ltb@E!GW6${Ytg6apgeIQ9-rhBJPcMD2FO9?6(SqPm zIZ&7@pHb8Ar?5KbIno;J*YPQv=6S##ldgTurb)ub^b`s!zJ46H3`LJKkG~G!GrN|1 zZ_@e1Az*f#z0S8|xR7GC2301KwG368(HV|5kP5ysV6aZ2Ug0-RNca^olar>Ou@%Ox z+aD?n6~ToniDx=KRb@nqjYv%@td)Ai+go!GMKLet7Vw_@8r!suM~>G##;Iuv3XzVy z2OULL+}CY&E--f1KRj*--)OQJuvg@8dfK77f;R?WG^bDssTX!vFBY(s_hz%-*foa(ix^c{S^tCp+PwrGv^m zZMh%nvBhmR+Clcqo6wfA>bkG!M)Tw;86U>PPL@!tX>x_R?%uR|uGy~Lh$ux~_xR_| z6qRP-v+(-s6-zJ#DPK!8iP6jtU`))owx(ki?hmAF{u0W6Wdvk$rMn7n!e-wR2@eka z!uda7T4?P)i!s$LwN#H>oDtU1Wi-k}x%K=h%TOlx{b5jShSO3+X*Sq-^{dy|C%cZ= zPr2g^8iHQNmnkr(HqJ9ib#+^{Ei{R{$HAK+GdW@EHb0|F6Nf6aBcdQ)cdkNJq3|z? z*f?zifNC`^zO2&eYB0I7@y$keu+2XuE%xUWHA&Sb zMt5t9?y%*b&f&5QyS)rkj~M^0qfvC-Fg^DM72xE#5_OW>k5(eZDnv>c@eH>iHH&mA zHv!lnw~`E&dd3L<$UT1}AdpfKg}dmaI^w;X)iKJKCV!Gg5$HO1_f>Q zxFR?&)K?=tSOwt9Xr-MHtEjUdy5t>&U8q{=@Dt{Wj*GewvoZmVjJi07@^&i}xW-XG z2=k)yRO@k7RYu{>|_C?nJknkU5B?JV{|KxNX06u$y+Ig-= z0v42Hl=Uwf{_|62G4$*I84v&e1ZC@g*vS9o-ueG~QORy;tjdMX$^|v9-7lxJ_|?14 zn67Pfa(n(^@zZCz%i-QSu#I+i@7F6AoPoUiW(e@uKU3I+)}H8w6}{sl06lkAC`^a3 zCa9Ab1QrQEZZ%Q|c;;h`>LvKRrQK_p$3KLgRw_9GTxoGWBjsiE3^V<<8X4fYFN;;^ znuIfn^0mF~-~|1ZLOpV~%ytf$$Cp)Z5w#-+))7!O%^D#CN$1W^{`>&aU^w^RWdtP{9FUW#@(4!=%r(a<4Q zr4OX|F^xJ~CN3{80U!Cp8Rc`Z0(d>3$fGJZa$*)YF*p42;V89N z{7oEk9WL6r52SJ@TqfsiUU3L%?F16-Khp9N-W zw3LMn-w3Z#;&q^Az~kd?#8RYHVHOgRwrPn(ZZ{-s1_BJCnysf`MgHYJ4bf(keZ{$e zAz7;_JW>koi;(i5tzM*eHYBTNeWTL|X>2N%s_N%v^?O0h$P?!MU|osoKVtQ?JdwUv z=QCpI)MqY`%SHnTf;(HKwzQHf%`%9sMSz#8oDWc>Dn*)@VJd3B-)dmLzY~0_(Z^Lm z1~y7{4TO(LYi*Z|fE*_vJm@SotdyX$RH!TdEon(`1Zl=Mn*YMz;bN|4WjRE;CL+o1 zzJ3r&C`s3((6D^lCDS}1sTf%asCQJ{(L>th1)%^zi9K%4KiVacC`ARl&nhdilnr;Z zk=$a_NHm+GGUmVlVWbs!b?yie8zU_!gF2ZUW&0WwD( z@&PC^WP6k4UOQmSEy8Y^F)JmQy`0(14B7cfC;s54+~t5RQ9|_-sY}cOG9{5F$gY8q zmFG38eMy#2KGEN-tDekb89T>_ET(_j<3e$a6zie%QY ziv*bMSb!>LOQ8#DXhy&w-*KDoomp>k%6uTb(xTQEyp1=fDT^6y>V=5cPI%W`Pt|%R z+-Q2GR7|2x6NP;_2szL`P zBMxaf<4gn|qu$?w*yoo$VYAbBN^CU}kFN^}NXP^r5}-=Sx9*{frV!vP2GFKlhH1;O z?eQ(X1x<7x3zD9ki_0&1Z!8fAVQmK^Y8zNL#zM7&Db`zo}lr%}F&7BtA#=p)9gIM<{e=7$P zJT3h6e(QACLh#J!Mk!V~?1Md*I_8E=O}z+N_qxEq&$ zX3M^3>RWtkTog4RR=OT`r4r&>SdvmpmiQ#txZf$?JXFFP&*%qi12CD$eI0)X=ufK6 zuCcXhheD!I2AijLwXq%67gIx#Lg^pXJ@i_}V_*7Fe|st(v_$Tc8DhI`jNnDpd~7 zg))|s@=!Edzw6L!pQ-WljQ+NAXCuJeQo8W!U94$adWv}sglK`GQ&l0K^C5!ukQ%X< z0NU&hM7`I<6NTnQlMMb%GDBxb|8K*a54)-JzsmiM5l-5AA*#|qk?Q6esrCEw z1}-o5jZ=l;O!gsMjhbT{2+}rzPR4#z zfoUI1;IOURvA;|LTyY1~1W|6>J!d381!!O>S)>Kuu+r>eI;_gboAsNJs?`f=T#abFZOJvUv@}xX`rb zy5;)&bV!!cqqVn@)-svgzaMHV0@r!f0KWA8md@rdUv-qVc5*ftO=7`Eh5ezb|33AO zRTF+$KrMpD?)GBMFQJmFi8Xh!K{YNXh z(sF!+Ud)N&fC1T}bvP4i+m75Hoy9-M>{20kXAE~TprA~j*#`*MnQ}iL$D;ub3TGwQ zq(J`xU7%7Ayc;=67dvsux|*&x$}`omfavsUI{L%Dx}<7Bk#4AR=qjLVLFppY8r8e2 zfR)n)i1Q)qmO$&&S0nFVbzJnO(~AMa?1&i@(rgI;y*Vpo=*hA85MRW9d4~Sj!rgVTYVv{J0$xPGYOdz_@ zn0>h01c>ZFx>hNNdRU| z?>>F8+LQc^n*VqK&@f-)qT&r20GssBgC>G2i|GI+gknw91GKX6A**!&Uzm!Ii5Qx` z0j=-t&Bk%AcHACv2h>O;ZsMMH&uzf@2GETkn#q3~4=uE%k)ap3XHbEEK+$x++VSBQ zUB3#X?rT%xybn`Gb7PfCCY7aM0|BG}fZK1<@li)WVY;`T(lQ!n^;!-Xm_crHOUFbR zdg#5F8#h9gi5eYJn_9g9s5$j|#H;m}%^U!!m0DEAU8$H9bB|L>0mW}5x0iDpf%!^m zAksX*1dbQIXNA)rs>G=awg&;c^h;LXQrEr!N(|@AMdGTyfbudAfXVe&YI!6(4@dxF z=dgL!I^&BsR9;Tdj^(jbv3*)z6M0QIQj#uemYD6j4HmD`^x|LNp+lZ>ReylPrw35- z?U>u?6V{V127x3l0;i{ zF0!7~PC%fHCC%`z>VSd-iU9N=k*<2z@5eH#kfXp_cQ~dWG>I%5vy0g>jY7Cab!iKo zhn1hcozo(vxhCEMzPosVE;RJY9G=3IK+r`z7e+Ab&6>FwI=6k~Hf}9tKW&ty z+O$)mn1}L%cX#@Mr7|)_2y|R{%3y-oIm35ElrP8{9l|AKCJ9)gnVWJj_StrT=SiOd zasg)@1#6jK0uoQ=OifhMXWP@lUbQ(znQZQb3Hg>K*?OjT0cCA<AaUZW2yq5FJO{1>_u&T?pcSzkg)KJMZ)q?cB8aE%Zs=#<05Q4<`tU-;$ljFpXNz z^USkoOHR1?G*3970W^ygcvd{+t@JIy*-d^b%1)qYA5brhxVGxlnIx5o7P8mo#oEnQ zXaFBlU*(dr*CYUuQ{+MrFw$-~YF{q}_+UO1`{YnF)v}Nk<2(Ah=G0G<`X(AMP(?fmYyb zIR>dBBV*Fp2MP8fB(u0)#^PhGb5t%*TTlmx8aAH*R#( zGXw@e^yfL$B7c^SeE@&b&4 zrC-~4X)LRArnJ%*f_ixum{l6cueAG;cznL|$ra|(D5}`r# z;(M0`gd-Y1n)WfD`3ye~pTGLv*wTJc+PW+)GiblhXT@w+ZWnm$#JttJOMkq2#NjpX zu-1=IZeR=3+^pPZm=i+>7>H>)KD&dnzfbR`fF_Fcv+epEGQp6{HX4MJn-sZfwQhW!3Raw0z?S+lm9@RS^#aGF_9Uh zw1NZH@d@xWc+k-A|1a2Tfd|r~vnxI`D_TQuJM|M>#_{nxuoOi$=y;WWt-PI@!w!*|jtI!22eLPYLJ%t z22T74DP{qP~n`+=cZUb=|s{86k|3l+Sa@e?LLpMoYEF8eOzQ(W8pwA0=Eed!|CM=QgX zFOymOo0Q5iGg#86yMR(ex<|C70kc>#viJ4VR;fetTcq#LK9|Euz-41F`01{E;R=z4 zNo>CX&740(H{aGX(R?vHCt<+Q0sGU$wUR#0e0a~7$!f$cbt-P}EJ^vGZ|Vtv(Y?F> z9%XlL93Dr^%HoP*)DjHc5iRm%jg;x<{ucBpWI3Q4^MFWG81}H z-kHUz zB9tnQ*27XpmcR6lbb`UY8O}1bf$5Ose%HHofqRxXZeJHC^0ua6NmrhRr>6`uq^G^F z%{<)Fw>OLs)EBwRO7l>wlVxXIbKB2CcuBY#Cwc!r=Yg&cN}eqeh``r+8uC^gJv}0fWY6Rt!#W!c~k0S`K}to zRMGYEBpzCHH_fE)db^`_!g34G`&hmsU9QPR&Bp%M!s*FaNZ)Lte=mx4ZN6cuH+70{ z7G&6^X5KiN&Ct&uuU**mbXLx{D_WPeD|3W=BWNRkHnL#?&GdZaJDTdtIe-*Ub+O^{ zou$@HHzydX)X2G6-Qf)}|G(P1@^C2Ezu&1}QOJo(nd0Q65=NHD-mxUcke!hs4%vo; z?36mPt1N>I&M<>vh>U$`L)pq~YBbg?LkyX*jO~5KIoCPw`^S4-=lxy3zuv2VpLy9{wIox ze|i2R5BYR@2rf?A@K(TQO4eH9d|ZFoDkAqGh;~cotS)r}rO;L_a>2Aw&1v=H6R$Mr zAU{Vc@{&s=tl9mz>gJ?ZB)x|F^u4Mnc_TUR(UmlmEvS%k;#+dQiW1`*2uwd>(&>~3 zt`f4yMKC}g)vR4{CG8$8)*1}63JKUz*5^3d%!z0?nDB1V`iNEV=VF8 zl{uS%mIcyRL>s`^(4skOOMFd2u82P-xrvf&=8jr>Y5XAB7&1F}O%2r`;ruUQ>at5N zHpDw{Pa8@xAkzY`pOmsi;DJGae;&%>Bl!36<1Ro&HP)OG{Qrr<-8pH@SQ2F z<1b%KOsVln$@(|JVMH$-1)u%CSZsNRIYiT+%8r1Bw@RzzM0P(=30MvHYL2VtFUalF z*?yc+N5^ygqba}=x{Z_Hwbwp<=tYG0SN%wnvP?v|rhQ)T8NP2B;p zF*<0QbqHnx2hCqLlhtke@O^H9k|)SSp*vIL2$Ink{Nvc_v4bIoTwKX`Oi}vg8nRdgfg;=nPx~i~4|h zXFEG^(Ns$g-)chEM??<_nAFkHBC+yq(exrOb)9Rn6Qt`k?gsRUCFt;Tkql_0E6k)^ zj(q6?E;CM@yA<3Vw&qTvLxtOG2W1qWb8!$_{>A3`4kK}eDLUJvcQ{~~q@NNq_=)o- z+(K~s<7%yYkvD+BLrf8IKUjLgLv9Lifa!Z$0qTebN35aG_kWq1DfbWh`g>;qRMNhZ zV|a3=1T_6sP*i_qjV z@fy{6NvvbP8>m=Oh z&txGXI*6HADOW#f2|Dy66?go|lD)vMXzMHZ;y!S&Gz4}Slaq^dK5k~+?3#1*HM?R= zv!lj)A;5o%Q=y#{YkysG#GI_NuLlke`{&&@hW$FamsIUM)CvqW`^W)Gct#>lmN0eF zlX}$``jqMbQ{j^i^sx1MG7CP51he#tnhcE-)%l(2*ZrWmZ4nr8Qpaows!05!Ji+@4 zAxsV%z#uZemY%1;jZsoA=k{P#Yt>ByKPp4U_r(Yp?oxq73|A|MM-$HtN}MmJT(pz% zW~&2241xkn|0<-l)(IA#U`SHsLuFMqZjQo+JtQ4N2(=x29!|uVt>G%{o|uhwmC^nZ z@_s&6-eiIQ5M6WA08e?l$Y;&ktqfo$^gh38Os{NipfaMzj=t(QGlC3)2+KEdzrP4} z^Ju$o_LRO~BC*3q5f*^MGj*O`UkRX52)V(nK5@d97!|a}K$;p!QkzEUo@tZ! z7I_nE?Y-AS3AzE1$*!wFrk3^saYvzq)eyCG@j|+Swtt` zLZ+@G=-UlTWdF*Ri;VE$aVAG?*d$GN)3s@gyG{bU1XaXFdqJ_LL|z7XooPo(b?k$S zU|EJTu{8&bK3%!;`L+g^x{vvyuk*wcntF^a2;`UwHRL?bYNi_4Jcco!+|KT<^DIh1 z#(Dg9K+&%}{p%R`5}EX7*{}U`YD2l%@zx4@!eD?MB6zi8(C9sPK45m@Q~9Vtgo>zS zxv;&|Bd`1wZe7Ssd*qeqc(VxE-T%ye^ve zMJevEZorkxM8!Cr!<(KOoY>h!5qagIs480hr^MG;R||$*=Yqb7)kBwBtA^c;v|aQx zf=ZICG_bdo7<`F?dL@inreY_zlLbW59}4g!j?dN}J)jbr?tGO=G>i7m2x{~nQ#P#s zx-JHtt!#CLA=)wCtb?|jBL-1j4JtEt7rhWSUgkA#wtRwQ07LrV3OMNaKV98+!%3E2 zpq#!Ktk83KqIe!$if`F~$DSW0gQ*`XBLXAZHEANygCyOCP~d`kQ8cAF%OsV7QJzCTY4%vaK<7d> z#~}f^(3hM6kA&_7KG}l+4-vo9B;HZ#{(q~b1D*cA#|wO)G9F4o!g*&KQ zUJ~}heQRDU_#f9>`W~uf+AQU%>j5E6)4A)UlfpWvttc@Q5Vf>KuNC|N zbbXHf80HL&VA>&YTs?%?s>R15k4#9WX+3Sd{c6ICDSZ^&x(cK-E($ridd@LkB4!{} zC9|wB^y(fUwFhE~8O`PCXt_4hn}JR)-0TXRAC!#5(H=fqgvoMd)I$sQDpc?GjapD= z8u9t1w|*-CQMrAn=*3P=QhiKxaML7vb0=@uQ~fP3!cTsb^cdiKbDSChDjdEw!wQL( z;J}D!%ftm~leW)y`s}=UDB8k^u{=j(LF9xm1a@`7XZdYw1uQlS06abSdK?HJPn@8L=Awtapti|Ph)j)5{6)ZrrMX+ zcXOWI9k{<-Ij>DXLF2@{q0^ymgV#J{ZqNR|TJ0=uU>))I4PoeMSd^SLIAp%fj*qiI2Xg?{7iy*629po+#wp!8F=L*;%s zq-BH74qD%1UP85ld0$3eW+DP@#+!82S#f9vvTb-XG<;iC_k2_H)|%-YBV$>6o@9%7 zb3I^OH)(6pUe`B%+08XvQ34pB;2m9V@2Uz1GeC~kj4I|qjK`x@n=rIopCzFM6s~E}(sZv0&Wucq8Y>h>NaR1+awFUz=*Va* zy2CW)(2nHv7!4_ay2igmNVirGa#v}s5t&qXW2|$7{4N6X{zO3Zp{7W-zASb{q3&AK zWNHi*ORC7*x>*k^-vO`etMXw*%Ao?=8D1Yn^vU&YmuhJB)%>=q@QC)>_bUoA2h>i4 z^bM16NJF`1VtJ3g7>bC^x zBhum&#`v@V`KHd+@3rKkk~j|3i!Mth_phhQ(eKJQg$%eE-W5T-%Gh+vQ<#7Di^jxC zeX-#~@S{3UnEF=u;2YZz?=uweUKl?e=cX4W55#agbG%1d~3#$cD| zFFhry4n&)H-j{Ws;lCAW9zKgYj-c)OlhH#1MLi1@0{$>C>0G-A#(@SyY!dGelT>6bW^oL zND)#C55p;r(AphaoVKZzye!pYwdKr%Wq72n^F5p4kjpuarE2hECx24Bj#72 zV;8??+i75ku+CN_J&L07oGO>|?UhgaTuG5Om8*A9z;}&ki-Do>+3j)Q4`p}U_Msbf zLibx>N(HUyqQM_tO$=;5KL#)2-B!@kYj$ZRZcOnN5?5+@_?ND;IyEr7xm$lFr1pqa z-Ci=hS#eM12*&t$Y2;``j3+hJ?{j}?3nnQ3i9-L|`Fpx+Ru=BYfA-XM-YB4ZYHcdQ z8Sj6dekVZ{esKLWR(|U9DZR{wq+9WW7M+N)vpO7Ci|{i|p>U!NrzqW7PNJwc`<}@H z>GbZ*n2qJau{E^g$pOm^nR!oZ$;u4Obrl@1Hs5;+Vl9hHY!+#sD=dOi1KZX=Wld^H z#W$|z@+G=RbB&G^)lf|1J+#rqJ?~qq^%pf_QJ%L#W>ezC#*y`CiK~B<^L?Z!@q0VI z?xC;k0G8%pO1Jd~NjPSPE+nAMj{`m3Mi4T=#7C}ZP@ zX!+`7SBDAO&6L@#u~HNnutkIgjIrE(R|ge&Q9w}Hq5^#dq%>i=gQLP+yt`=kxhxyM zKg??IcIcN+>`YOxIYgoQmIY~i98{4TC$Ct$JU?<&hwq1&wFZlK1lWfSr6M`_QD%!M3H7c(??AFmx9H5dokG*xHduf&}sE zAFRX11q?HBV6HCDME{6%mEJ=Xh}IDYfM=J@bEg10T)Z~V<1?Ak>a3+n=C{`0mtRWTUewH=!=X2`Nl 0: ctx.get_node_mgr().add_nodes(pl.node_parser.idx, nodelist) - return "Done" @@ -48,13 +58,15 @@ async def redindex_data(): @data_app.post(path="/v1/data/files") async def add_files(request: FilesIn): docs = [] + pl = ctx.get_pipeline_mgr().get_active_pipeline() + kb = ctx.get_knowledge_mgr().get_active_knowledge_base() + docs_name = kb.name + pl.name + str(pl.indexer.d) if request.local_paths is not None: - docs.extend(ctx.get_file_mgr().add_files(docs=request.local_paths)) + docs.extend(ctx.get_file_mgr().add_files(docs=request.local_path, kb_name=docs_name)) nodelist = ctx.get_pipeline_mgr().run_data_prepare(docs=docs) if nodelist is None or len(nodelist) == 0: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="File not found") - pl = ctx.get_pipeline_mgr().get_active_pipeline() ctx.get_node_mgr().add_nodes(pl.node_parser.idx, nodelist) return "Done" @@ -62,32 +74,47 @@ async def add_files(request: FilesIn): # GET files @data_app.get(path="/v1/data/files") async def get_files(): - return ctx.get_file_mgr().get_files() + return ctx.get_file_mgr().get_all_docs() # GET a file @data_app.get(path="/v1/data/files/{name}") -async def get_file_docs(name): - return ctx.get_file_mgr().get_file_by_name_or_id(name) +async def get_kb_files_by_name(name): + return ctx.get_file_mgr().get_kb_files_by_name(name) # DELETE a file @data_app.delete(path="/v1/data/files/{name}") -async def delete_file(name): - if ctx.get_file_mgr().del_file(name): - pl = ctx.get_pipeline_mgr().get_active_pipeline() - +async def delete_file(kb_name, file_path): + pl = ctx.get_pipeline_mgr().get_active_pipeline() + docs_name = kb_name + pl.name + str(pl.indexer.d) + if ctx.get_file_mgr().del_file(docs_name, file_path): # Current solution: reindexing all docs after deleting one file # TODO: delete the nodes related to the file ctx.get_node_mgr().del_nodes_by_np_idx(pl.node_parser.idx) - pl.indexer.reinitialize_indexer() + pl.indexer.reinitialize_indexer(kb_name) pl.update_indexer_to_retriever() - - all_docs = ctx.get_file_mgr().get_all_docs() + all_docs = ctx.get_file_mgr().get_file_by_name(docs_name) nodelist = ctx.get_pipeline_mgr().run_data_prepare(docs=all_docs) if nodelist is not None and len(nodelist) > 0: ctx.get_node_mgr().add_nodes(pl.node_parser.idx, nodelist) + return "File is deleted" + else: + return "File not found" + + +# DELETE a file +@data_app.delete(path="/v1/data/all_files/{name}") +async def delete_all_file(name): + if ctx.get_file_mgr().del_kb_file(name): + pl = ctx.get_pipeline_mgr().get_active_pipeline() + + # Current solution: reindexing all docs after deleting one file + # TODO: delete the nodes related to the file + ctx.get_node_mgr().del_nodes_by_np_idx(pl.node_parser.idx) + pl.indexer.reinitialize_indexer() + pl.update_indexer_to_retriever() return f"File {name} is deleted" else: return f"File {name} not found" @@ -110,8 +137,6 @@ async def upload_file(file_name: str, file: UploadFile = File(...)): ) os.makedirs(UPLOAD_DIRECTORY, exist_ok=True) safe_filename = file.filename - # Sanitize the uploaded file's name - safe_filename = file.filename file_path = os.path.normpath(os.path.join(UPLOAD_DIRECTORY, safe_filename)) # Ensure file_path is within UPLOAD_DIRECTORY if not file_path.startswith(os.path.abspath(UPLOAD_DIRECTORY)): diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py b/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py index 21e0c0621e..5ac3b23622 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py @@ -5,10 +5,13 @@ import json import os import re +from typing import Dict, List, Union from edgecraftrag.api.v1.data import add_data -from edgecraftrag.api_schema import DataIn, KnowledgeBaseCreateIn +from edgecraftrag.api_schema import DataIn, ExperienceIn, KnowledgeBaseCreateIn from edgecraftrag.base import IndexerType +from edgecraftrag.components.query_preprocess import query_search +from edgecraftrag.components.retriever import get_kbs_info from edgecraftrag.context import ctx from edgecraftrag.utils import compare_mappings from fastapi import FastAPI, HTTPException, status @@ -18,6 +21,7 @@ # Define the root directory for knowledge base files KNOWLEDGE_BASE_ROOT = "/home/user/ui_cache" +CONFIG_DIR = "/home/user/ui_cache/configs" # Get all knowledge bases @@ -41,15 +45,20 @@ async def get_knowledge_base(knowledge_name: str): async def create_knowledge_base(knowledge: KnowledgeBaseCreateIn): try: active_pl = ctx.get_pipeline_mgr().get_active_pipeline() + if not active_pl: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Please activate pipeline") if not re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", knowledge.name): raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Knowledge base names must begin with a letter or underscore", ) - kb = ctx.knowledgemgr.create_knowledge_base(knowledge) - if kb.active: - active_pl.indexer.reinitialize_indexer(kb.name) + + if knowledge.active and knowledge.comp_type == "knowledge" and knowledge.comp_subtype == "origin_kb": + active_pl.indexer.reinitialize_indexer(knowledge.name) active_pl.update_indexer_to_retriever() + elif knowledge.active and knowledge.comp_subtype == "kbadmin_kb": + active_pl.retriever.config_kbadmin_milvus(knowledge.name) + kb = ctx.knowledgemgr.create_knowledge_base(knowledge) await save_knowledge_to_file() return "Create knowledge base successfully" except Exception as e: @@ -63,17 +72,28 @@ async def delete_knowledge_base(knowledge_name: str): rm_kb = ctx.knowledgemgr.get_knowledge_base_by_name_or_id(knowledge_name) active_kb = ctx.knowledgemgr.get_active_knowledge_base() active_pl = ctx.get_pipeline_mgr().get_active_pipeline() - if active_kb.name == knowledge_name or active_kb.idx == knowledge_name: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Cannot delete a running knowledge base." - ) - kb_file_path = rm_kb.get_file_paths() - if kb_file_path: - if active_pl.indexer.comp_subtype == "milvus_vector": - await remove_file_handler([], knowledge_name) + if rm_kb.comp_type == "knowledge" and rm_kb.comp_subtype == "origin_kb": if active_kb: - active_pl.indexer.reinitialize_indexer(active_kb.name) - active_pl.update_indexer_to_retriever() + if active_kb.name == knowledge_name or active_kb.idx == knowledge_name: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Cannot delete a running knowledge base.", + ) + kb_file_path = rm_kb.get_file_paths() + if kb_file_path: + if active_pl.indexer.comp_subtype == "milvus_vector": + await remove_file_handler([], knowledge_name) + if active_kb: + active_pl.indexer.reinitialize_indexer(active_kb.name) + active_pl.update_indexer_to_retriever() + if rm_kb.comp_type == "experience": + if rm_kb.experience_active: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Cannot delete a running experience knowledge base.", + ) + else: + rm_kb.clear_experiences() result = ctx.knowledgemgr.delete_knowledge_base(knowledge_name) await save_knowledge_to_file() return result @@ -87,26 +107,31 @@ async def update_knowledge_base(knowledge: KnowledgeBaseCreateIn): try: kb = ctx.knowledgemgr.get_knowledge_base_by_name_or_id(knowledge.name) active_pl = ctx.get_pipeline_mgr().get_active_pipeline() - if active_pl.indexer.comp_subtype != "milvus_vector": - if knowledge.active and knowledge.active != kb.active: - file_paths = kb.get_file_paths() - await update_knowledge_base_handler(file_paths, knowledge.name) - elif not knowledge.active and kb.description != knowledge.description: - pass - elif not knowledge.active: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Must have an active knowledge base" - ) - else: + if active_pl.indexer.comp_subtype == "kbadmin_indexer" and kb.comp_subtype != "kbadmin_kb": + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="The kbadmin pipeline must correspond to the kbadmin type kb.", + ) + if active_pl.indexer.comp_subtype != "kbadmin_indexer" and kb.comp_subtype == "kbadmin_kb": + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Not kbadmin pipeline cannot active kbadmin type kb." + ) + if kb.comp_type == "knowledge" and kb.comp_subtype == "origin_kb": + if active_pl.indexer.comp_subtype != "milvus_vector": + if knowledge.active and knowledge.active != kb.active: + file_paths = kb.get_file_paths() + await update_knowledge_base_handler(file_paths, knowledge.name) + elif not knowledge.active and kb.description != knowledge.description: + pass + else: + if knowledge.active and knowledge.active != kb.active: + active_pl.indexer.reinitialize_indexer(knowledge.name) + active_pl.update_indexer_to_retriever() + elif not knowledge.active and kb.description != knowledge.description: + pass + elif kb.comp_subtype == "kbadmin_kb": if knowledge.active and knowledge.active != kb.active: - active_pl.indexer.reinitialize_indexer(knowledge.name) - active_pl.update_indexer_to_retriever() - elif not knowledge.active and kb.description != knowledge.description: - pass - elif not knowledge.active: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Must have an active knowledge base" - ) + active_pl.retriever.config_kbadmin_milvus(kb.name) result = ctx.knowledgemgr.update_knowledge_base(knowledge) await save_knowledge_to_file() return result @@ -120,6 +145,16 @@ async def add_file_to_knowledge_base(knowledge_name, file_path: DataIn): try: active_pl = ctx.get_pipeline_mgr().get_active_pipeline() kb = ctx.knowledgemgr.get_knowledge_base_by_name_or_id(knowledge_name) + if kb.comp_type == "experience": + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="The experience type cannot perform file operations." + ) + if kb.comp_subtype == "kbadmin_kb" or active_pl.indexer.comp_subtype == "kbadmin_indexer": + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Please proceed to the kbadmin interface to perform the operation.", + ) + # Validate and normalize the user-provided path user_path = file_path.local_path normalized_path = os.path.normpath(os.path.join(KNOWLEDGE_BASE_ROOT, user_path)) if not normalized_path.startswith(KNOWLEDGE_BASE_ROOT): @@ -170,6 +205,15 @@ async def remove_file_from_knowledge_base(knowledge_name, file_path: DataIn): try: active_pl = ctx.get_pipeline_mgr().get_active_pipeline() kb = ctx.knowledgemgr.get_knowledge_base_by_name_or_id(knowledge_name) + if kb.comp_type == "experience": + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="The experience type cannot perform file operations." + ) + if kb.comp_subtype == "kbadmin_kb" or active_pl.indexer.comp_subtype == "kbadmin_indexer": + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Please proceed to the kbadmin interface to perform the operation.", + ) active_kb = ctx.knowledgemgr.get_active_knowledge_base() if file_path.local_path in kb.get_file_paths(): kb.remove_file_path(file_path.local_path) @@ -178,17 +222,9 @@ async def remove_file_from_knowledge_base(knowledge_name, file_path: DataIn): kb_file_path = kb.get_file_paths() if active_pl.indexer.comp_subtype == "milvus_vector": - if active_kb: - if active_kb.name == knowledge_name or active_kb.idx == knowledge_name: - await remove_file_handler(kb_file_path, knowledge_name) - else: - await remove_file_handler(kb_file_path, knowledge_name) - active_pl.indexer.reinitialize_indexer(active_kb.name) - active_pl.update_indexer_to_retriever() - else: - await remove_file_handler(kb_file_path, knowledge_name) - active_pl.indexer.reinitialize_indexer(active_kb.name) - active_pl.update_indexer_to_retriever() + docs_name = kb.name + active_pl.name + str(active_pl.indexer.d) + docs_list = ctx.get_file_mgr().del_file(docs_name, file_path.local_path) + active_pl.indexer.delete(docs_list) elif active_kb: if active_kb.name == knowledge_name or active_kb.idx == knowledge_name: await update_knowledge_base_handler(kb_file_path, knowledge_name) @@ -198,14 +234,115 @@ async def remove_file_from_knowledge_base(knowledge_name, file_path: DataIn): raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) +@kb_app.post("/v1/experience") +def get_experience_by_question(req: ExperienceIn): + kb = ctx.knowledgemgr.get_experience_kb() + result = kb.get_experience_by_question(req.question) + if not result: + raise HTTPException(404, detail="Experience not found") + return result + + +@kb_app.get("/v1/experiences") +def get_all_experience(): + kb = ctx.knowledgemgr.get_experience_kb() + if kb: + return kb.get_all_experience() + else: + return kb + + +@kb_app.patch("/v1/experiences") +def update_experience(experience: ExperienceIn): + kb = ctx.knowledgemgr.get_experience_kb() + result = kb.update_experience(experience.question, experience.content) + if not result: + raise HTTPException(404, detail="Question not found") + return result + + +@kb_app.delete("/v1/experiences") +def delete_experience(req: ExperienceIn): + kb = ctx.knowledgemgr.get_experience_kb() + success = kb.delete_experience(req.question) + if not success: + raise HTTPException(404, detail=f"Question {req.question} not found") + return {"message": "Question deleted"} + + +@kb_app.post("/v1/multiple_experiences/check") +def check_duplicate_multiple_experiences(experiences: List[Dict[str, Union[str, List[str]]]]): + kb = ctx.knowledgemgr.get_experience_kb() + if not kb: + raise HTTPException(404, detail="No active experience type knowledge base") + all_existing = kb.get_all_experience() + existing_questions = {item["question"] for item in all_existing} + new_questions = [exp["question"] for exp in experiences if "question" in exp] + duplicate_questions = [q for q in new_questions if q in existing_questions] + if duplicate_questions: + return {"code": 2001, "detail": "Duplicate experiences are appended OR overwritten!"} + else: + kb.add_multiple_experiences(experiences=experiences, flag=True) + return {"status": "success", "detail": "No duplicate experiences, added successfully"} + + +@kb_app.post("/v1/multiple_experiences/confirm") +def confirm_multiple_experiences(experiences: List[Dict[str, Union[str, List[str]]]], flag: bool): + kb = ctx.knowledgemgr.get_experience_kb() + try: + if not kb: + raise HTTPException(404, detail="No active experience type knowledge base") + kb.add_multiple_experiences(experiences=experiences, flag=flag) + return {"status": "success", "detail": "Experiences added successfully"} + except Exception as e: + raise HTTPException(status_code=500, detail=f"Add Failure:{str(e)}") + + +@kb_app.post("/v1/experiences/files") +def add_experiences_from_file(req: DataIn): + kb = ctx.knowledgemgr.get_experience_kb() + try: + kb.add_experiences_from_file(req.local_path) + return {"status": "success"} + except Exception as e: + raise HTTPException(status_code=400, detail=str(e)) + + +@kb_app.post(path="/v1/view_sub_questions") +async def view_sub_questions(que: ExperienceIn): + active_pl = ctx.get_pipeline_mgr().get_active_pipeline() + CONFIG_DIR + search_config_path = os.path.join(CONFIG_DIR, "search_config.yaml") + search_dir = os.path.join(CONFIG_DIR, "experience_dir/experience.json") + top1_issue, sub_questions_result = await query_search( + user_input=que.question, search_config_path=search_config_path, search_dir=search_dir, pl=active_pl + ) + return sub_questions_result + + +@kb_app.get("/v1/kbadmin/kbs_list") +def get_kbs_list(): + active_pl = ctx.get_pipeline_mgr().get_active_pipeline() + try: + if not active_pl or active_pl.indexer.comp_subtype != "kbadmin_indexer": + return [] + CONNECTION_ARGS = {"uri": active_pl.indexer.vector_url} + kbs_list = get_kbs_info(CONNECTION_ARGS) + kb_names = [name for name in kbs_list.keys()] + return kb_names + except Exception as e: + raise HTTPException(status_code=400, detail=str(e)) + + # Update knowledge base data async def update_knowledge_base_handler(file_path=None, knowledge_name: str = "default_kb", add_file: bool = False): if ctx.get_pipeline_mgr().get_active_pipeline() is None: raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Please activate pipeline") pl = ctx.get_pipeline_mgr().get_active_pipeline() + docs_name = knowledge_name + pl.name + str(pl.indexer.d) if add_file and file_path: - return await add_data(file_path) + return await add_data(file_path, docs_name) else: try: ctx.get_node_mgr().del_nodes_by_np_idx(pl.node_parser.idx) @@ -214,7 +351,7 @@ async def update_knowledge_base_handler(file_path=None, knowledge_name: str = "d if file_path: for file in file_path: request = DataIn(local_path=file) - await add_data(request) + await add_data(request, docs_name) except MilvusException as e: raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) return "Done" @@ -233,16 +370,16 @@ async def remove_file_handler(file_path=None, knowledge_name: str = "default_kb" except MilvusException as e: raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) pl.update_indexer_to_retriever() + docs_name = knowledge_name + pl.name + str(pl.indexer.d) if file_path: for file in file_path: request = DataIn(local_path=file) - await add_data(request) + await add_data(request, docs_name) return "Done" # Restore knowledge base configuration async def load_knowledge_from_file(): - CONFIG_DIR = "/home/user/ui_cache/configs" KNOWLEDGEBASE_FILE = os.path.join(CONFIG_DIR, "knowledgebase.json") active_pl = ctx.get_pipeline_mgr().get_active_pipeline() if os.path.exists(KNOWLEDGEBASE_FILE): @@ -253,28 +390,31 @@ async def load_knowledge_from_file(): for Knowledgebase_data in all_data: pipeline_req = KnowledgeBaseCreateIn(**Knowledgebase_data) kb = ctx.knowledgemgr.create_knowledge_base(pipeline_req) - if Knowledgebase_data["file_map"]: - if active_pl.indexer.comp_subtype != "milvus_vector" and Knowledgebase_data["active"]: - for file_path in Knowledgebase_data["file_map"].values(): - await update_knowledge_base_handler( - DataIn(local_path=file_path), Knowledgebase_data["name"], add_file=True - ) - kb.add_file_path(file_path) - elif Knowledgebase_data["active"]: - active_pl.indexer.reinitialize_indexer(Knowledgebase_data["name"]) - active_pl.update_indexer_to_retriever() - for file_path in Knowledgebase_data["file_map"].values(): - kb.add_file_path(file_path) - else: - for file_path in Knowledgebase_data["file_map"].values(): - kb.add_file_path(file_path) + if kb.comp_type == "knowledge" and kb.comp_subtype == "origin_kb": + if Knowledgebase_data["file_map"]: + if active_pl.indexer.comp_subtype != "milvus_vector" and Knowledgebase_data["active"]: + for file_path in Knowledgebase_data["file_map"].values(): + await update_knowledge_base_handler( + DataIn(local_path=file_path), Knowledgebase_data["name"], add_file=True + ) + kb.add_file_path(file_path) + elif Knowledgebase_data["active"]: + active_pl.indexer.reinitialize_indexer(Knowledgebase_data["name"]) + active_pl.update_indexer_to_retriever() + for file_path in Knowledgebase_data["file_map"].values(): + kb.add_file_path(file_path) + else: + for file_path in Knowledgebase_data["file_map"].values(): + kb.add_file_path(file_path) + elif kb.comp_subtype == "kbadmin_kb": + if Knowledgebase_data["active"]: + active_pl.retriever.config_kbadmin_milvus(kb.name) except Exception as e: print(f"Error load Knowledge base: {e}") # Configuration of knowledge base for persistence async def save_knowledge_to_file(): - CONFIG_DIR = "/home/user/ui_cache/configs" KNOWLEDGEBASE_FILE = os.path.join(CONFIG_DIR, "knowledgebase.json") if not os.path.exists(CONFIG_DIR): os.makedirs(CONFIG_DIR, exist_ok=True) @@ -282,7 +422,15 @@ async def save_knowledge_to_file(): kb_base = ctx.knowledgemgr.get_all_knowledge_bases() knowledgebases_data = [] for kb in kb_base: - kb_json = {"name": kb.name, "description": kb.description, "active": kb.active, "file_map": kb.file_map} + kb_json = { + "name": kb.name, + "description": kb.description, + "active": kb.active, + "file_map": kb.file_map, + "comp_type": kb.comp_type, + "comp_subtype": kb.comp_subtype, + "experience_active": kb.experience_active, + } knowledgebases_data.append(kb_json) json_str = json.dumps(knowledgebases_data, indent=2, ensure_ascii=False) with open(KNOWLEDGEBASE_FILE, "w", encoding="utf-8") as f: @@ -291,7 +439,7 @@ async def save_knowledge_to_file(): print(f"Error saving Knowledge base: {e}") -all_pipeline_milvus_maps = {} +all_pipeline_milvus_maps = {"change_pl": []} current_pipeline_kb_map = {} @@ -299,29 +447,87 @@ async def refresh_milvus_map(milvus_name): current_pipeline_kb_map.clear() knowledge_bases_list = await get_all_knowledge_bases() for kb in knowledge_bases_list: + if kb.comp_type == "experience": + continue current_pipeline_kb_map[kb.name] = kb.file_map all_pipeline_milvus_maps[milvus_name] = copy.deepcopy(current_pipeline_kb_map) + milvus_maps_path = os.path.join(CONFIG_DIR, "milvus_maps.json") + with open(milvus_maps_path, "w", encoding="utf-8") as f: + json.dump(all_pipeline_milvus_maps, f, ensure_ascii=False, indent=2) -async def Synchronizing_vector_data(old_active_pl, new_active_pl): +def read_milvus_maps(): + milvus_maps_path = os.path.join(CONFIG_DIR, "milvus_maps.json") + global all_pipeline_milvus_maps try: + with open(milvus_maps_path, "r", encoding="utf-8") as f: + all_pipeline_milvus_maps = json.load(f) + except Exception as e: + all_pipeline_milvus_maps = {"change_pl": []} + return all_pipeline_milvus_maps + + +def save_change_pl(pl_name): + if pl_name not in all_pipeline_milvus_maps["change_pl"]: + return all_pipeline_milvus_maps["change_pl"].append(pl_name) + + +async def Synchronizing_vector_data(old_active_pl, new_active_pl, pl_change): + try: + if pl_change: + save_change_pl(new_active_pl.name) active_kb = ctx.knowledgemgr.get_active_knowledge_base() - active_pl = ctx.get_pipeline_mgr().get_active_pipeline() + # Determine whether it is kbadmin type + if old_active_pl: + if ( + old_active_pl.retriever.comp_subtype == "kbadmin_retriever" + and new_active_pl.retriever.comp_subtype == "kbadmin_retriever" + ): + if active_kb: + if active_kb.comp_subtype == "kbadmin_kb": + new_active_pl.retriever.config_kbadmin_milvus(active_kb.name) + return True + elif old_active_pl.retriever.comp_subtype == "kbadmin_retriever": + return True + milvus_name = ( old_active_pl.name + str(old_active_pl.indexer.model_extra["d"]) if old_active_pl else "default_kb" ) - if not active_kb: - return True - if not active_pl: + if not new_active_pl.status.active: if old_active_pl: if old_active_pl.indexer.comp_subtype == "milvus_vector": await refresh_milvus_map(milvus_name) return True - + if not active_kb: + return True + if new_active_pl.retriever.comp_subtype == "kbadmin_retriever": + if active_kb: + if active_kb.comp_subtype == "kbadmin_kb": + new_active_pl.retriever.config_kbadmin_milvus(active_kb.name) + return True + # Perform milvus data synchronization if new_active_pl.indexer.comp_subtype == "milvus_vector": + # Pipeline component state changed + if new_active_pl.name in all_pipeline_milvus_maps["change_pl"]: + kb_list = await get_all_knowledge_bases() + for kb in kb_list: + if kb.comp_type == "knowledge" and kb.comp_subtype == "origin_kb": + new_active_pl.indexer.clear_milvus_collection(kb.name) + new_active_pl.indexer.reinitialize_indexer(kb.name) + new_active_pl.update_indexer_to_retriever() + add_list = kb.get_file_paths() + docs_name = kb.name + new_active_pl.name + str(new_active_pl.indexer.d) + ctx.get_file_mgr().del_kb_file(docs_name) + for file in add_list: + await add_data(DataIn(local_path=file), docs_name) + all_pipeline_milvus_maps["change_pl"].remove(new_active_pl.name) + return True + # Pipeline component state not changed new_milvus_map = {} kb_list = await get_all_knowledge_bases() for kb in kb_list: + if kb.comp_type == "experience": + continue new_milvus_map[kb.name] = kb.file_map added_files, deleted_files = compare_mappings( new_milvus_map, @@ -330,21 +536,22 @@ async def Synchronizing_vector_data(old_active_pl, new_active_pl): # Synchronization of deleted files for kb_name, file_paths in deleted_files.items(): if file_paths: - new_active_pl.indexer.clear_milvus_collection(kb_name) if kb_name not in new_milvus_map.keys(): + new_active_pl.indexer.clear_milvus_collection(kb_name) continue kb = await get_knowledge_base(kb_name) new_active_pl.indexer.reinitialize_indexer(kb_name) - file_paths = kb.get_file_paths() - if file_paths: - for file in file_paths: - await add_data(DataIn(local_path=file)) + for file_path in file_paths.values(): + docs_name = kb.name + new_active_pl.name + str(new_active_pl.indexer.d) + docs_list = ctx.get_file_mgr().del_file(docs_name, file_path) + new_active_pl.indexer.delete(docs_list) # Synchronization of added files for kb_name, file_paths in added_files.items(): if file_paths: for file_path in file_paths.values(): new_active_pl.indexer.reinitialize_indexer(kb_name) - await add_data(DataIn(local_path=file_path)) + docs_name = kb_name + new_active_pl.name + str(new_active_pl.indexer.d) + await add_data(DataIn(local_path=file_path), docs_name) new_active_pl.indexer.reinitialize_indexer(active_kb.name) new_active_pl.update_indexer_to_retriever() @@ -354,7 +561,8 @@ async def Synchronizing_vector_data(old_active_pl, new_active_pl): new_active_pl.update_indexer_to_retriever() add_list = active_kb.get_file_paths() for file in add_list: - await add_data(DataIn(local_path=file)) + docs_name = active_kb.name + new_active_pl.name + str(new_active_pl.indexer.d) + await add_data(DataIn(local_path=file), docs_name) if old_active_pl: if old_active_pl.indexer.comp_subtype == "milvus_vector": await refresh_milvus_map(milvus_name) diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/model.py b/EdgeCraftRAG/edgecraftrag/api/v1/model.py index bbc0d9806b..bce669280b 100644 --- a/EdgeCraftRAG/edgecraftrag/api/v1/model.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/model.py @@ -123,7 +123,10 @@ def get_available_weights(model_path): def get_available_models(model_type): avail_models = [] - if model_type == "LLM": + if model_type == "vLLM": + LLM_MODEL = os.getenv("LLM_MODEL", "Qwen/Qwen3-8B") + avail_models.append(LLM_MODEL) + elif model_type == "LLM": items = os.listdir(CONTAINER_MODEL_PATH) for item in items: if item == "BAAI": @@ -134,6 +137,8 @@ def get_available_models(model_type): avail_models.append(item + "/" + sub_path) else: avail_models.append(item) + elif model_type == "kbadmin_embedding_model": + return ["BAAI/bge-large-zh-v1.5"] else: for item in os.listdir(CONTAINER_MODEL_PATH + "BAAI"): if (model_type == "reranker" and "rerank" in item) or (model_type == "embedding" and "rerank" not in item): diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py b/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py index e1cd5b8345..dfa2ec25e6 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py @@ -4,6 +4,7 @@ import asyncio import json import os +import re import weakref from concurrent.futures import ThreadPoolExecutor @@ -12,15 +13,21 @@ from edgecraftrag.base import IndexerType, InferenceType, ModelType, NodeParserType, PostProcessorType, RetrieverType from edgecraftrag.components.benchmark import Benchmark from edgecraftrag.components.generator import QnAGenerator -from edgecraftrag.components.indexer import VectorIndexer +from edgecraftrag.components.indexer import KBADMINIndexer, VectorIndexer from edgecraftrag.components.node_parser import ( HierarchyNodeParser, + KBADMINParser, SimpleNodeParser, SWindowNodeParser, UnstructedNodeParser, ) from edgecraftrag.components.postprocessor import MetadataReplaceProcessor, RerankProcessor -from edgecraftrag.components.retriever import AutoMergeRetriever, SimpleBM25Retriever, VectorSimRetriever +from edgecraftrag.components.retriever import ( + AutoMergeRetriever, + KBadminRetriever, + SimpleBM25Retriever, + VectorSimRetriever, +) from edgecraftrag.context import ctx from fastapi import FastAPI, File, HTTPException, UploadFile, status from pymilvus import connections @@ -51,16 +58,30 @@ async def get_pipeline_json(name): # GET Pipeline benchmark -@pipeline_app.get(path="/v1/settings/pipelines/{name}/benchmark") -async def get_pipeline_benchmark(name): - pl = ctx.get_pipeline_mgr().get_pipeline_by_name_or_id(name) +@pipeline_app.get(path="/v1/settings/pipeline/benchmark") +async def get_pipeline_benchmark(): + pl = ctx.get_pipeline_mgr().get_active_pipeline() if pl and pl.benchmark: return pl.benchmark +# GET Pipeline benchmark +@pipeline_app.get(path="/v1/settings/pipelines/{name}/benchmarks") +async def get_pipeline_benchmarks(name): + pl = ctx.get_pipeline_mgr().get_pipeline_by_name_or_id(name) + if pl and pl.benchmark: + return pl.benchmark.benchmark_data_list + + # POST Pipeline @pipeline_app.post(path="/v1/settings/pipelines") async def add_pipeline(request: PipelineCreateIn): + pattern = re.compile(r"^[a-zA-Z0-9_]+$") + if not pattern.fullmatch(request.name): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Pipeline name must consist of letters, numbers, and underscores.", + ) return load_pipeline(request) @@ -126,9 +147,11 @@ def update_pipeline_handler(pl, req): active_kb = ctx.knowledgemgr.get_active_knowledge_base() active_pipeline = ctx.get_pipeline_mgr().get_active_pipeline() kb_name = active_kb.name if active_kb else "default_kb" + pl_change = False if req.node_parser is not None: np = req.node_parser + pl_change = ctx.get_node_parser_mgr().search_parser_change(pl, req) found_parser = ctx.get_node_parser_mgr().search_parser(np) if found_parser is not None: pl.node_parser = found_parser @@ -153,12 +176,10 @@ def update_pipeline_handler(pl, req): pl.node_parser = SWindowNodeParser.from_defaults(window_size=np.window_size) case NodeParserType.UNSTRUCTURED: pl.node_parser = UnstructedNodeParser(chunk_size=np.chunk_size, chunk_overlap=np.chunk_overlap) + case NodeParserType.KBADMINPARSER: + pl.node_parser = KBADMINParser() ctx.get_node_parser_mgr().add(pl.node_parser) - all_docs = ctx.get_file_mgr().get_all_docs() - nodelist = pl.node_parser.run(docs=all_docs) - if nodelist is not None and len(nodelist) > 0: - ctx.get_node_mgr().add_nodes(pl.node_parser.idx, nodelist) pl._node_changed = True if req.indexer is not None: @@ -168,17 +189,24 @@ def update_pipeline_handler(pl, req): pl.indexer = found_indexer else: embed_model = None - if ind.embedding_model: - embed_model = ctx.get_model_mgr().search_model(ind.embedding_model) - if embed_model is None: - ind.embedding_model.model_type = ModelType.EMBEDDING - embed_model = ctx.get_model_mgr().load_model(ind.embedding_model) - ctx.get_model_mgr().add(embed_model) match ind.indexer_type: case IndexerType.DEFAULT_VECTOR | IndexerType.FAISS_VECTOR | IndexerType.MILVUS_VECTOR: + if ind.embedding_model: + embed_model = ctx.get_model_mgr().search_model(ind.embedding_model) + if embed_model is None: + ind.embedding_model.model_type = ModelType.EMBEDDING + embed_model = ctx.get_model_mgr().load_model(ind.embedding_model) + ctx.get_model_mgr().add(embed_model) # TODO: **RISK** if considering 2 pipelines with different # nodes, but same indexer, what will happen? - pl.indexer = VectorIndexer(embed_model, ind.indexer_type, ind.vector_uri, kb_name) + pl.indexer = VectorIndexer(embed_model, ind.indexer_type, ind.vector_url, kb_name) + case IndexerType.KBADMIN_INDEXER: + kbadmin_embedding_url = ind.embedding_url + KBADMIN_VECTOR_URL = ind.vector_url + embed_model = ind.embedding_model.model_id + pl.indexer = KBADMINIndexer( + embed_model, ind.indexer_type, kbadmin_embedding_url, KBADMIN_VECTOR_URL + ) case _: pass ctx.get_indexer_mgr().add(pl.indexer) @@ -208,6 +236,8 @@ def update_pipeline_handler(pl, req): pl.retriever = SimpleBM25Retriever(pl.indexer, similarity_top_k=retr.retrieve_topk) else: return Exception("No indexer") + case RetrieverType.KBADMIN_RETRIEVER: + pl.retriever = KBadminRetriever(pl.indexer, similarity_top_k=retr.retrieve_topk) case _: pass # Index is updated to retriever @@ -272,7 +302,7 @@ def run_async_task(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: - loop.run_until_complete(Synchronizing_vector_data(active_pipeline, pl)) + loop.run_until_complete(Synchronizing_vector_data(active_pipeline, pl, pl_change)) except Exception as e: raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Synchronization error: {e}") finally: @@ -292,8 +322,8 @@ def load_pipeline_from_file(): with open(PIPELINE_FILE, "r", encoding="utf-8") as f: all_pipelines = f.read() try: - all_da = json.loads(all_pipelines) - for pipeline_data in all_da: + all_data = json.loads(all_pipelines) + for pipeline_data in all_data: one_pipelinejson = json.loads(pipeline_data) pipeline_req = PipelineCreateIn(**one_pipelinejson) load_pipeline(pipeline_req) @@ -323,18 +353,18 @@ def save_pipeline_to_file(): # Detecting if milvus is connected @pipeline_app.post(path="/v1/check/milvus") async def check_milvus(request: MilvusConnectRequest): - vector_uri = request.vector_uri + vector_url = request.vector_url try: - if vector_uri.startswith("http://"): - host_port = vector_uri.replace("http://", "") - elif vector_uri.startswith("https://"): - host_port = vector_uri.replace("https://", "") + if vector_url.startswith("http://"): + host_port = vector_url.replace("http://", "") + elif vector_url.startswith("https://"): + host_port = vector_url.replace("https://", "") else: - host_port = vector_uri + host_port = vector_url host, port = host_port.split(":", 1) - connections.connect(alias="default", host=host, port=port) + connections.connect(alias="knowledge_default", host=host, port=port) - if connections.has_connection("default"): + if connections.has_connection("knowledge_default"): return {"status": "200", "message": "Milvus connection successful."} else: return {"status": "404", "message": "Milvus connection failed."} diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/prompt.py b/EdgeCraftRAG/edgecraftrag/api/v1/prompt.py index 86639a40a7..0de6a283a2 100644 --- a/EdgeCraftRAG/edgecraftrag/api/v1/prompt.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/prompt.py @@ -41,6 +41,8 @@ async def get_prompt(): try: generator = ctx.get_pipeline_mgr().get_active_pipeline().generator if generator: + if generator.prompt_content is not None: + return generator.prompt_content return generator.prompt except Exception as e: raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) diff --git a/EdgeCraftRAG/edgecraftrag/api_schema.py b/EdgeCraftRAG/edgecraftrag/api_schema.py index d7ae1c8478..2bdf8dbd4e 100644 --- a/EdgeCraftRAG/edgecraftrag/api_schema.py +++ b/EdgeCraftRAG/edgecraftrag/api_schema.py @@ -25,7 +25,8 @@ class NodeParserIn(BaseModel): class IndexerIn(BaseModel): indexer_type: str embedding_model: Optional[ModelIn] = None - vector_uri: Optional[str] = None + embedding_url: Optional[str] = None + vector_url: Optional[str] = None class RetrieverIn(BaseModel): @@ -80,7 +81,15 @@ class KnowledgeBaseCreateIn(BaseModel): name: str description: Optional[str] = None active: Optional[bool] = None + comp_type: Optional[str] = "knowledge" + comp_subtype: Optional[str] = "origin_kb" + experience_active: Optional[bool] = None + + +class ExperienceIn(BaseModel): + question: str + content: list[str] = None class MilvusConnectRequest(BaseModel): - vector_uri: str + vector_url: str diff --git a/EdgeCraftRAG/edgecraftrag/base.py b/EdgeCraftRAG/edgecraftrag/base.py index db1dc414b8..3306afc2ed 100644 --- a/EdgeCraftRAG/edgecraftrag/base.py +++ b/EdgeCraftRAG/edgecraftrag/base.py @@ -19,7 +19,9 @@ class CompType(str, Enum): RETRIEVER = "retriever" POSTPROCESSOR = "postprocessor" GENERATOR = "generator" + QUERYSEARCH = "querysearch" FILE = "file" + CHUNK_NUM = "chunk_num" class ModelType(str, Enum): @@ -44,6 +46,7 @@ class NodeParserType(str, Enum): HIERARCHY = "hierarchical" SENTENCEWINDOW = "sentencewindow" UNSTRUCTURED = "unstructured" + KBADMINPARSER = "kbadmin_parser" class IndexerType(str, Enum): @@ -51,6 +54,7 @@ class IndexerType(str, Enum): FAISS_VECTOR = "faiss_vector" DEFAULT_VECTOR = "vector" MILVUS_VECTOR = "milvus_vector" + KBADMIN_INDEXER = "kbadmin_indexer" class RetrieverType(str, Enum): @@ -58,6 +62,7 @@ class RetrieverType(str, Enum): VECTORSIMILARITY = "vectorsimilarity" AUTOMERGE = "auto_merge" BM25 = "bm25" + KBADMIN_RETRIEVER = "kbadmin_retriever" class PostProcessorType(str, Enum): @@ -113,9 +118,19 @@ class BaseMgr: def __init__(self): self.components = {} - def add(self, comp: BaseComponent): + def add(self, comp: BaseComponent, name: str = None): + if name: + self.components[name] = comp + return True self.components[comp.idx] = comp + def append(self, comp: BaseComponent, name: str = None): + key = name if name else comp.idx + if key not in self.components: + self.components[key] = [] + self.components[key].append(comp) + return True + def get(self, idx: str) -> BaseComponent: if idx in self.components: return self.components[idx] diff --git a/EdgeCraftRAG/edgecraftrag/components/benchmark.py b/EdgeCraftRAG/edgecraftrag/components/benchmark.py index fc3801b5d3..3bf2a7e602 100644 --- a/EdgeCraftRAG/edgecraftrag/components/benchmark.py +++ b/EdgeCraftRAG/edgecraftrag/components/benchmark.py @@ -49,7 +49,14 @@ def cal_input_token_size(self, input_text_list): return input_token_size def init_benchmark_data(self): - pipeline_comp = [CompType.RETRIEVER, CompType.POSTPROCESSOR, CompType.GENERATOR] + pipeline_comp = [ + CompType.NODEPARSER, + CompType.CHUNK_NUM, + CompType.RETRIEVER, + CompType.POSTPROCESSOR, + CompType.QUERYSEARCH, + CompType.GENERATOR, + ] if self.is_enabled(): with self._idx_lock: self.last_idx += 1 @@ -58,6 +65,8 @@ def init_benchmark_data(self): data["idx"] = idx for comp in pipeline_comp: data[comp] = "" + data[CompType.NODEPARSER] = 0 + data[CompType.CHUNK_NUM] = 0 return idx, data def update_benchmark_data(self, idx, comp_type, start, end): diff --git a/EdgeCraftRAG/edgecraftrag/components/generator.py b/EdgeCraftRAG/edgecraftrag/components/generator.py index cb170fcd10..0f746b89cb 100755 --- a/EdgeCraftRAG/edgecraftrag/components/generator.py +++ b/EdgeCraftRAG/edgecraftrag/components/generator.py @@ -84,10 +84,39 @@ def extract_unstructured_eles(retrieved_nodes=[], text_gen_context=""): return unstructured_str +def build_stream_response(status=None, content=None, error=None): + response = {"status": status, "contentType": "text"} + if content is not None: + response["content"] = content + if error is not None: + response["error"] = error + return response + + async def local_stream_generator(lock, llm, prompt_str, unstructured_str): async with lock: response = llm.stream_complete(prompt_str) collected_data = [] + try: + for r in response: + collected_data.append(r.delta) + yield r.delta + await asyncio.sleep(0) + if unstructured_str: + collected_data.append(unstructured_str) + yield unstructured_str + res = "".join(collected_data) + save_history(res) + except Exception as e: + start_idx = str(e).find("message") + len("message") + result_error = str(e)[start_idx:] + yield f"code:0000{result_error}" + + +async def stream_generator(llm, prompt_str, unstructured_str): + response = llm.stream_complete(prompt_str) + collected_data = [] + try: for r in response: collected_data.append(r.delta) yield r.delta @@ -97,20 +126,10 @@ async def local_stream_generator(lock, llm, prompt_str, unstructured_str): yield unstructured_str res = "".join(collected_data) save_history(res) - - -async def stream_generator(llm, prompt_str, unstructured_str): - response = llm.stream_complete(prompt_str) - collected_data = [] - for r in response: - collected_data.append(r.delta) - yield r.delta - await asyncio.sleep(0) - if unstructured_str: - collected_data.append(unstructured_str) - yield unstructured_str - res = "".join(collected_data) - save_history(res) + except Exception as e: + start_idx = str(e).find("message") + len("message") + result_error = str(e)[start_idx:] + yield f"code:0000{result_error}" class QnAGenerator(BaseComponent): @@ -130,13 +149,20 @@ def __init__(self, llm_model, prompt_template_file, inference_type, vllm_endpoin self.llm = llm_model if isinstance(llm_model, str): self.model_id = llm_model + self.model_path = llm_model else: - self.model_id = llm_model().model_id + llm_instance = llm_model() + if llm_instance.model_path is None or llm_instance.model_path == "": + self.model_id = llm_instance.model_id + self.model_path = os.path.join("/home/user/models/", os.getenv("LLM_MODEL", "Qwen/Qwen3-8B")) + else: + self.model_id = llm_instance.model_id + self.model_path = llm_instance.model_path if self.inference_type == InferenceType.LOCAL: self.lock = asyncio.Lock() self.prompt_content = prompt_content self.prompt_template_file = prompt_template_file - self.prompt = self.init_prompt(self.model_id, self.prompt_content, self.prompt_template_file) + self.prompt = self.init_prompt(self.model_path, self.prompt_content, self.prompt_template_file) self.llm = llm_model if isinstance(llm_model, str): @@ -151,20 +177,13 @@ def __init__(self, llm_model, prompt_template_file, inference_type, vllm_endpoin vllm_endpoint = os.getenv("vLLM_ENDPOINT", "http://localhost:8086") self.vllm_endpoint = vllm_endpoint - def init_prompt(self, model_id, prompt_content=None, prompt_template_file=None, enable_think=False): - # using the prompt template enhancement strategy(only tested on Qwen2-7B-Instruction) if template_enhance_on is true - template_enhance_on = True if "Qwen2" in self.model_id else False + def init_prompt(self, model_path, prompt_content=None, prompt_template_file=None, enable_think=False): if prompt_content: - self.set_prompt(prompt_content) - return get_prompt_template(model_id, prompt_content, prompt_template_file, enable_think) + return get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) elif prompt_template_file is None: print("There is no template file, using the default template.") - prompt_template = get_prompt_template(model_id, prompt_content, prompt_template_file, enable_think) - return ( - DocumentedContextRagPromptTemplate.from_template(prompt_template) - if template_enhance_on - else prompt_template - ) + prompt_template = get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) + return prompt_template else: safe_root = "/templates" prompt_template_file = os.path.normpath(os.path.join(safe_root, prompt_template_file)) @@ -172,25 +191,19 @@ def init_prompt(self, model_id, prompt_content=None, prompt_template_file=None, raise ValueError("Invalid template path") if not os.path.exists(prompt_template_file): raise ValueError("Template file not exists") - if template_enhance_on: - return DocumentedContextRagPromptTemplate.from_file(prompt_template_file) - else: - return get_prompt_template(model_id, prompt_content, prompt_template_file, enable_think) + return get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) def set_prompt(self, prompt): if "{context}" not in prompt: prompt += "\n<|im_start|>{context}<|im_end|>" if "{chat_history}" not in prompt: prompt += "\n<|im_start|>{chat_history}" - self.prompt = prompt + self.prompt_content = prompt + self.prompt = self.init_prompt(self.model_path, self.prompt_content, self.prompt_template_file) def reset_prompt(self): - prompt_template = get_prompt_template(self.model_id) - self.prompt = ( - DocumentedContextRagPromptTemplate.from_template(prompt_template) - if self.template_enhance_on - else prompt_template - ) + self.prompt_content = None + self.prompt = self.init_prompt(self.model_path, self.prompt_content, self.prompt_template_file) def clean_string(self, string): ret = string @@ -206,20 +219,21 @@ def query_transform(self, chat_request, retrieved_nodes, sub_questions=None): :return: Generated text_gen_context and prompt_str.""" text_gen_context = "" for n in retrieved_nodes: - origin_text = n.node.get_text() + origin_text = n.node.text text_gen_context += self.clean_string(origin_text.strip()) query = chat_request.messages chat_history = concat_history(chat_request.messages) # Modify model think status if chat_request.chat_template_kwargs: - if self.enable_think != chat_request.chat_template_kwargs["enable_thinking"]: - self.prompt = self.init_prompt( - self.model_id, - self.prompt_content, - self.prompt_template_file, - chat_request.chat_template_kwargs["enable_thinking"], - ) - self.enable_think = chat_request.chat_template_kwargs["enable_thinking"] + if "enable_thinking" in chat_request.chat_template_kwargs: + if self.enable_think != chat_request.chat_template_kwargs["enable_thinking"]: + self.prompt = self.init_prompt( + self.model_path, + self.prompt_content, + self.prompt_template_file, + chat_request.chat_template_kwargs["enable_thinking"], + ) + self.enable_think = chat_request.chat_template_kwargs["enable_thinking"] if sub_questions: final_query = f"{query}\n\n### Sub-questions ###\nThe following list is how you should consider the answer, you MUST follow these steps when responding:\n\n{sub_questions}" else: diff --git a/EdgeCraftRAG/edgecraftrag/components/indexer.py b/EdgeCraftRAG/edgecraftrag/components/indexer.py index 842122964f..bd79bb3042 100644 --- a/EdgeCraftRAG/edgecraftrag/components/indexer.py +++ b/EdgeCraftRAG/edgecraftrag/components/indexer.py @@ -13,8 +13,7 @@ class VectorIndexer(BaseComponent, VectorStoreIndex): - - def __init__(self, embed_model, vector_type, milvus_uri="http://localhost:19530", kb_name="default_kb"): + def __init__(self, embed_model, vector_type, vector_url="http://localhost:19530", kb_name="default_kb"): BaseComponent.__init__( self, comp_type=CompType.INDEXER, @@ -26,10 +25,10 @@ def __init__(self, embed_model, vector_type, milvus_uri="http://localhost:19530" from llama_index.core import Settings Settings.embed_model = None - self.milvus_uri = milvus_uri - self._initialize_indexer(embed_model, vector_type, milvus_uri, kb_name) + self.vector_url = vector_url + self._initialize_indexer(embed_model, vector_type, vector_url, kb_name) - def _initialize_indexer(self, embed_model, vector_type, milvus_uri, kb_name): + def _initialize_indexer(self, embed_model, vector_type, vector_url, kb_name): # get active name pl = ctx.get_pipeline_mgr().get_active_pipeline() plname = pl.name if pl else "" @@ -46,7 +45,7 @@ def _initialize_indexer(self, embed_model, vector_type, milvus_uri, kb_name): VectorStoreIndex.__init__(self, embed_model=embed_model, nodes=[], storage_context=faiss_store) case IndexerType.MILVUS_VECTOR: milvus_vector_store = MilvusVectorStore( - uri=milvus_uri, + uri=vector_url, dim=self.d, collection_name=kb_name + plname + str(self.d), overwrite=False, @@ -55,14 +54,14 @@ def _initialize_indexer(self, embed_model, vector_type, milvus_uri, kb_name): VectorStoreIndex.__init__(self, embed_model=embed_model, nodes=[], storage_context=milvus_store) def reinitialize_indexer(self, kb_name="default_kb"): - self._initialize_indexer(self.model, self.comp_subtype, self.milvus_uri, kb_name) + self._initialize_indexer(self.model, self.comp_subtype, self.vector_url, kb_name) def clear_milvus_collection(self, kb_name="default_kb"): # get active name pl = ctx.get_pipeline_mgr().get_active_pipeline() plname = pl.name if pl else "" milvus_vector_store = MilvusVectorStore( - uri=self.milvus_uri, + uri=self.vector_url, collection_name=kb_name + plname + str(self.d), overwrite=False, ) @@ -75,3 +74,42 @@ def run(self, **kwargs) -> Any: def ser_model(self): set = {"idx": self.idx, "indexer_type": self.comp_subtype, "model": self.model} return set + + +class KBADMINIndexer(BaseComponent): + # Handled in the kbadmin project + def __init__(self, embed_model, vector_type, kbadmin_embedding_url, vector_url="http://localhost:29530"): + BaseComponent.__init__( + self, + comp_type=CompType.INDEXER, + comp_subtype=IndexerType.KBADMIN_INDEXER, + ) + self.embed_model = embed_model + self.kbadmin_embedding_url = kbadmin_embedding_url + self.vector_url = vector_url + + def insert_nodes(self, nodes): + return None + + def _index_struct(self, nodes): + return None + + def run(self, **kwargs) -> Any: + return None + + def reinitialize_indexer(self, kb_name="default_kb"): + return None + + def clear_milvus_collection(self, **kwargs): + return None + + @model_serializer + def ser_model(self): + set = { + "idx": self.idx, + "indexer_type": self.comp_subtype, + "model": {"model_id": self.embed_model}, + "kbadmin_embedding_url": self.kbadmin_embedding_url, + "vector_url": self.vector_url, + } + return set diff --git a/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py b/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py index 259c4a463f..45ea309fad 100644 --- a/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py +++ b/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py @@ -1,8 +1,9 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import json import os -from typing import Any, List, Optional +from typing import Any, Dict, List, Optional, Union from edgecraftrag.base import BaseComponent from pydantic import model_serializer @@ -12,8 +13,12 @@ class Knowledge(BaseComponent): file_paths: Optional[List[str]] = [] file_map: Optional[List[str]] = {} description: Optional[str] = "None" - comp_type: str = "knowledge" + comp_type: Optional[str] = "knowledge" + comp_subtype: Optional[str] = "origin_kb" + experience_active: Optional[bool] = False if comp_type == "knowledge" else True active: bool + if comp_type == "experience": + comp_subtype = None def _update_file_names(self) -> None: self.file_map = {os.path.basename(path): path for path in self.file_paths if path is not None} @@ -35,6 +40,105 @@ def remove_file_path(self, file_path: str) -> bool: def get_file_paths(self) -> List[str]: return self.file_paths + def ensure_file_exists(self): + dir_path = os.path.dirname(self.file_paths[0]) + os.makedirs(dir_path, exist_ok=True) + if not os.path.exists(self.file_paths[0]): + with open(self.file_paths[0], "w", encoding="utf-8") as f: + json.dump([], f, ensure_ascii=False, indent=4) + + def get_all_experience(self) -> List[Dict]: + experinence_file = "/home/user/ui_cache/configs/experience_dir/experience.json" + if experinence_file not in self.file_paths: + self.file_paths.append(experinence_file) + if not os.path.isfile(self.file_paths[0]): + self.ensure_file_exists() + with open(self.file_paths[0], "r", encoding="utf-8") as f: + return json.load(f) + + def get_experience_by_question(self, question: str) -> Optional[Dict]: + for item in self.get_all_experience(): + if item.get("question") == question: + return item + return None + + def add_multiple_experiences( + self, experiences: List[Dict[str, Union[str, List[str]]]], flag: bool = True + ) -> List[Dict]: + all_experiences = self.get_all_experience() + result = [] + for exp in experiences: + question = exp.get("question") + if not question: + raise ValueError("Must exist when uploading question") + content = exp.get("content", []) + found = False + for item in all_experiences: + if item["question"] == question: + if flag: + item["content"].extend([c for c in content if c not in item["content"]]) + else: + item["content"] = content + result.append(item) + found = True + break + if not found: + new_item = {"question": question, "content": content} + all_experiences.append(new_item) + result.append(new_item) + with open(self.file_paths[0], "w", encoding="utf-8") as f: + json.dump(all_experiences, f, ensure_ascii=False, indent=4) + return result + + def delete_experience(self, question: str) -> bool: + items = self.get_all_experience() + remaining_items = [item for item in items if item.get("question") != question] + if len(remaining_items) == len(items): + return False + with open(self.file_paths[0], "w", encoding="utf-8") as f: + json.dump(remaining_items, f, ensure_ascii=False, indent=4) + return True + + def clear_experiences(self) -> bool: + all_experiences = self.get_all_experience() + with open(self.file_paths[0], "w", encoding="utf-8") as f: + json.dump([], f, ensure_ascii=False, indent=4) + return True + + def update_experience(self, question: str, content: List[str]) -> Optional[Dict]: + items = self.get_all_experience() + for i, item in enumerate(items): + if item.get("question") == question: + updated_item = {"question": question, "content": content} + items[i] = updated_item + with open(self.file_paths[0], "w", encoding="utf-8") as f: + json.dump(items, f, ensure_ascii=False, indent=4) + return updated_item + return None + + def add_experiences_from_file(self, file_path: str, flag: bool = False) -> List[Dict]: + if not file_path.endswith(".json"): + raise ValueError("File upload type error") + try: + with open(file_path, "r", encoding="utf-8") as f: + experiences = json.load(f) + if not isinstance(experiences, list): + raise ValueError("The contents of the file must be a list") + return self.add_multiple_experiences(experiences=experiences, flag=flag) + except json.JSONDecodeError as e: + raise ValueError("File parsing failure") + except Exception as e: + raise RuntimeError("File Error") + + def calculate_totals(self): + if self.comp_type == "knowledge": + total = len(self.file_paths) + elif self.comp_type == "experience": + total = len(self.get_all_experience()) + else: + total = None + return total + def run(self, **kwargs) -> Any: pass @@ -44,8 +148,11 @@ def ser_model(self): "idx": self.idx, "name": self.name, "comp_type": self.comp_type, + "comp_subtype": self.comp_subtype, "file_map": self.file_map, "description": self.description, "active": self.active, + "experience_active": self.experience_active, + "total": self.calculate_totals(), } return set diff --git a/EdgeCraftRAG/edgecraftrag/components/node_parser.py b/EdgeCraftRAG/edgecraftrag/components/node_parser.py index 0f386bc61f..0bd49b91b4 100644 --- a/EdgeCraftRAG/edgecraftrag/components/node_parser.py +++ b/EdgeCraftRAG/edgecraftrag/components/node_parser.py @@ -168,3 +168,25 @@ def ser_model(self): "chunk_overlap": self.chunk_overlap, } return set + + +class KBADMINParser(BaseComponent): + # Handled in the kbadmin project + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.comp_type = CompType.NODEPARSER + self.comp_subtype = NodeParserType.KBADMINPARSER + + def run(self, **kwargs) -> Any: + return None + + def insert_nodes(self): + return None + + @model_serializer + def ser_model(self): + set = { + "idx": self.idx, + "parser_type": self.comp_subtype, + } + return set diff --git a/EdgeCraftRAG/edgecraftrag/components/pipeline.py b/EdgeCraftRAG/edgecraftrag/components/pipeline.py index 41780ef88f..29205a3819 100644 --- a/EdgeCraftRAG/edgecraftrag/components/pipeline.py +++ b/EdgeCraftRAG/edgecraftrag/components/pipeline.py @@ -44,7 +44,7 @@ def __init__( if self.name == "" or self.name is None: self.name = self.idx self.enable_benchmark = os.getenv("ENABLE_BENCHMARK", "False").lower() == "true" - self.run_pipeline_cb = run_generator_ben if self.enable_benchmark else run_generator + self.run_pipeline_cb = run_generator self.run_retriever_cb = run_retrieve self.run_data_prepare_cb = run_simple_doc @@ -97,12 +97,10 @@ def check_active(self, nodelist, kb_name): # TODO: update doc changes # TODO: more operations needed, add, del, modify def update_nodes(self, nodes): - print(f"Updating {len(nodes)} nodes ...") if self.indexer is not None: self.indexer.insert_nodes(nodes) def update_indexer_to_retriever(self): - print("Updating indexer to retriever ...") if self.indexer is not None and self.retriever is not None: old_retriever = self.retriever retriever_type = old_retriever.comp_subtype @@ -122,7 +120,6 @@ def update_indexer_to_retriever(self): # Implement abstract run function # callback dispatcher def run(self, **kwargs) -> Any: - print(kwargs) if "cbtype" in kwargs: if kwargs["cbtype"] == CallbackType.DATAPREP: if "docs" in kwargs: @@ -183,9 +180,18 @@ def model_existed(self, model_id: str) -> bool: # Test callback to retrieve nodes from query def run_retrieve(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: + benchmark_data = {} query = chat_request.messages + top_k = None if chat_request.k == ChatCompletionRequest.model_fields["k"].default else chat_request.k contexts = {} - retri_res = pl.retriever.run(query=query) + start = 0 + if pl.enable_benchmark: + _, benchmark_data = pl.benchmark.init_benchmark_data() + start = time.perf_counter() + retri_res = pl.retriever.run(query=query, top_k=top_k) + if pl.enable_benchmark: + benchmark_data[CompType.RETRIEVER] = time.perf_counter() - start + pl.benchmark.insert_benchmark_data(benchmark_data) contexts[CompType.RETRIEVER] = retri_res query_bundle = QueryBundle(query) if pl.postprocessor: @@ -201,10 +207,18 @@ def run_retrieve(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: def run_simple_doc(pl: Pipeline, docs: List[Document]) -> Any: + start = 0 + benchmark_data = {} + if pl.enable_benchmark: + _, benchmark_data = pl.benchmark.init_benchmark_data() + start = time.perf_counter() n = pl.node_parser.run(docs=docs) if pl.indexer is not None: pl.indexer.insert_nodes(n) - print(pl.indexer._index_struct) + if pl.enable_benchmark: + benchmark_data[CompType.NODEPARSER] += time.perf_counter() - start + benchmark_data[CompType.CHUNK_NUM] += len(n) + pl.benchmark.insert_benchmark_data(benchmark_data) return n @@ -225,114 +239,93 @@ async def timing_wrapper(): return ret -def run_generator_ben(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: - benchmark_index, benchmark_data = pl.benchmark.init_benchmark_data() +def run_generator(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: + if pl.enable_benchmark: + benchmark_index, benchmark_data = pl.benchmark.init_benchmark_data() contexts = {} - start = time.perf_counter() + retri_res = [] + active_kb = chat_request.user if chat_request.user else None + enable_rag_retrieval = ( + chat_request.chat_template_kwargs.get("enable_rag_retrieval", True) + if chat_request.chat_template_kwargs + else True + ) + if not active_kb: + enable_rag_retrieval = False + elif pl.retriever.comp_subtype == "kbadmin_retriever" and active_kb.comp_subtype == "origin_kb": + enable_rag_retrieval = False + elif pl.retriever.comp_subtype != "kbadmin_retriever" and active_kb.comp_subtype == "kbadmin_kb": + enable_rag_retrieval = False query = chat_request.messages - if pl.generator.inference_type == InferenceType.VLLM: - UI_DIRECTORY = os.getenv("TMPFILE_PATH", "/home/user/ui_cache") - search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") - search_dir = os.path.join(UI_DIRECTORY, "configs/search_dir") - - def run_async_query_search(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - try: - return loop.run_until_complete(query_search(query, search_config_path, search_dir, pl)) - finally: - loop.close() - - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(run_async_query_search) - top1_issue, sub_questionss_result = future.result() - if sub_questionss_result: - query = query + sub_questionss_result - - retri_res = pl.retriever.run(query=query) - query_bundle = QueryBundle(query) - benchmark_data[CompType.RETRIEVER] = time.perf_counter() - start - contexts[CompType.RETRIEVER] = retri_res - - start = time.perf_counter() - if pl.postprocessor: - for processor in pl.postprocessor: - if ( - isinstance(processor, RerankProcessor) - and chat_request.top_n != ChatCompletionRequest.model_fields["top_n"].default - ): - processor.top_n = chat_request.top_n - retri_res = processor.run(retri_res=retri_res, query_bundle=query_bundle) - contexts[CompType.POSTPROCESSOR] = retri_res - benchmark_data[CompType.POSTPROCESSOR] = time.perf_counter() - start + sub_questionss_result = None + experience_status = True if chat_request.tool_choice == "auto" else False + if enable_rag_retrieval: + start = 0 + if pl.enable_benchmark: + start = time.perf_counter() + if pl.generator.inference_type == InferenceType.VLLM and experience_status: + UI_DIRECTORY = "/home/user/ui_cache" + search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") + search_dir = os.path.join(UI_DIRECTORY, "configs/experience_dir/experience.json") + + def run_async_query_search(): + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + return loop.run_until_complete(query_search(query, search_config_path, search_dir, pl)) + finally: + loop.close() + + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(run_async_query_search) + top1_issue, sub_questionss_result = future.result() + if sub_questionss_result: + query = query + sub_questionss_result + if pl.enable_benchmark: + benchmark_data[CompType.QUERYSEARCH] = time.perf_counter() - start + start = time.perf_counter() + top_k = None if chat_request.k == ChatCompletionRequest.model_fields["k"].default else chat_request.k + retri_res = pl.retriever.run(query=query, top_k=top_k) + if pl.enable_benchmark: + benchmark_data[CompType.RETRIEVER] = time.perf_counter() - start + contexts[CompType.RETRIEVER] = retri_res + query_bundle = QueryBundle(query) + if pl.enable_benchmark: + start = time.perf_counter() + if pl.postprocessor: + for processor in pl.postprocessor: + if ( + isinstance(processor, RerankProcessor) + and chat_request.top_n != ChatCompletionRequest.model_fields["top_n"].default + ): + processor.top_n = chat_request.top_n + retri_res = processor.run(retri_res=retri_res, query_bundle=query_bundle) + contexts[CompType.POSTPROCESSOR] = retri_res + if pl.enable_benchmark: + benchmark_data[CompType.POSTPROCESSOR] = time.perf_counter() - start if pl.generator is None: raise ValueError("No Generator Specified") - text_gen_context, prompt_str = pl.generator.query_transform(chat_request, retri_res) - input_token_size = pl.benchmark.cal_input_token_size(prompt_str) - - np_type = pl.node_parser.comp_subtype - start = time.perf_counter() - if pl.generator.inference_type == InferenceType.LOCAL: - ret = pl.generator.run(chat_request, retri_res, np_type) - elif pl.generator.inference_type == InferenceType.VLLM: - ret = pl.generator.run_vllm(chat_request, retri_res, np_type, sub_questions=sub_questionss_result) - else: - raise ValueError("LLM inference_type not supported") - end = time.perf_counter() + if pl.enable_benchmark: + _, prompt_str = pl.generator.query_transform(chat_request, retri_res) + input_token_size = pl.benchmark.cal_input_token_size(prompt_str) - if isinstance(ret, StreamingResponse): - ret = benchmark_response(ret, pl.benchmark, benchmark_index, benchmark_data, input_token_size, start) - else: - benchmark_data[CompType.GENERATOR] = end - start - pl.benchmark.insert_llm_data(benchmark_index, input_token_size) - pl.benchmark.insert_benchmark_data(benchmark_data) - return ret, contexts - - -def run_generator(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: - query = chat_request.messages - contexts = {} - if pl.generator.inference_type == InferenceType.VLLM: - UI_DIRECTORY = os.getenv("TMPFILE_PATH", "/home/user/ui_cache") - search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") - search_dir = os.path.join(UI_DIRECTORY, "configs/search_dir") - - def run_async_query_search(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - try: - return loop.run_until_complete(query_search(query, search_config_path, search_dir, pl)) - finally: - loop.close() - - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(run_async_query_search) - top1_issue, sub_questionss_result = future.result() - if sub_questionss_result: - query = query + sub_questionss_result - retri_res = pl.retriever.run(query=query) - contexts[CompType.RETRIEVER] = retri_res - query_bundle = QueryBundle(query) - - if pl.postprocessor: - for processor in pl.postprocessor: - if ( - isinstance(processor, RerankProcessor) - and chat_request.top_n != ChatCompletionRequest.model_fields["top_n"].default - ): - processor.top_n = chat_request.top_n - retri_res = processor.run(retri_res=retri_res, query_bundle=query_bundle) - contexts[CompType.POSTPROCESSOR] = retri_res - - if pl.generator is None: - raise ValueError("No Generator Specified") np_type = pl.node_parser.comp_subtype + if pl.enable_benchmark: + start = time.perf_counter() if pl.generator.inference_type == InferenceType.LOCAL: ret = pl.generator.run(chat_request, retri_res, np_type) elif pl.generator.inference_type == InferenceType.VLLM: ret = pl.generator.run_vllm(chat_request, retri_res, np_type, sub_questions=sub_questionss_result) else: raise ValueError("LLM inference_type not supported") + if pl.enable_benchmark: + end = time.perf_counter() + if isinstance(ret, StreamingResponse): + ret = benchmark_response(ret, pl.benchmark, benchmark_index, benchmark_data, input_token_size, start) + else: + benchmark_data[CompType.GENERATOR] = end - start + pl.benchmark.insert_llm_data(benchmark_index, input_token_size) + pl.benchmark.insert_benchmark_data(benchmark_data) return ret, contexts diff --git a/EdgeCraftRAG/edgecraftrag/components/postprocessor.py b/EdgeCraftRAG/edgecraftrag/components/postprocessor.py index bb59cc3d21..cbd387f59e 100644 --- a/EdgeCraftRAG/edgecraftrag/components/postprocessor.py +++ b/EdgeCraftRAG/edgecraftrag/components/postprocessor.py @@ -60,5 +60,5 @@ def run(self, **kwargs) -> Any: @model_serializer def ser_model(self): - set = {"idx": self.idx, "processor_type": self.comp_subtype, "model": None, "top_n": None} + set = {"idx": self.idx, "processor_type": self.comp_subtype, "top_n": None} return set diff --git a/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py b/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py index 124014a038..320f2c32aa 100644 --- a/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py +++ b/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py @@ -85,6 +85,7 @@ def __init__( output_template="", json_key="relevance", json_levels=["Low", "High"], + scores_weight=None, temperature=1.0, API_BASE=None, **kwargs, @@ -107,6 +108,19 @@ def __init__( self.json_levels = json_levels self.API_BASE = API_BASE + # dynamically set scores_weight, use default if not provided + if scores_weight is None: + # generate default weights based on json_levels count + if len(json_levels) == 2: + self.scores_weight = [0.0, 1.0] # Low, High + elif len(json_levels) == 3: + self.scores_weight = [0.0, 0.5, 1.0] # Low, Medium, High + else: + # for other counts, generate evenly spaced weights + self.scores_weight = [i / (len(json_levels) - 1) for i in range(len(json_levels))] + else: + self.scores_weight = scores_weight + async def invoke_vllm(self, input_texts): headers = {"Content-Type": "application/json"} payload = { @@ -152,18 +166,22 @@ async def _calculate_logits_score(self, user_input, issue): def _calculate_token_score_vllm(self, outputs, output_index=1, transform="exp"): generated_scores = outputs[output_index] - three_scores = [ - generated_scores.get("Low", -9999.0), - generated_scores.get("Medium", -9999.0), - generated_scores.get("High", -9999.0), - ] - level_scores = [score / self.temperature for score in three_scores] + + # dynamically get scores for all levels + level_scores = [] + for level in self.json_levels: + level_scores.append(generated_scores.get(level, -9999.0)) + + # apply temperature scaling + level_scores = [score / self.temperature for score in level_scores] level_scores_np = numpy.array(level_scores) level_scores_np = numpy.where(level_scores_np < -1000, -1000, level_scores_np) level_scores_np_exp = numpy.exp(level_scores_np - numpy.max(level_scores_np)) scores_probs = level_scores_np_exp / level_scores_np_exp.sum() - scores_weight = numpy.array([0.0, 0.5, 1.0]) # Low=0, Medium=0.5, High=1 + + # using dynamic scores_weight + scores_weight = numpy.array(self.scores_weight) final_score = numpy.dot(scores_probs, scores_weight) return final_score @@ -172,40 +190,43 @@ async def compute_score(self, input_pair): return await self._calculate_logits_score(*input_pair) -def read_json_files(directory: str) -> dict: +def read_json_files(file_path: str) -> dict: result = {} - for filename in os.listdir(directory): - if filename.endswith(".json"): - file_path = os.path.join(directory, filename) - if os.path.isfile(file_path): - try: - with open(file_path, "r", encoding="utf-8") as file: - data = json.load(file) - result.update(data) - except Exception: - continue + if os.path.isfile(file_path): + with open(file_path, "r", encoding="utf-8") as f: + result = json.load(f) return result async def query_search(user_input, search_config_path, search_dir, pl): - top1_issue = None - sub_questionss_result = None - if not os.path.exists(search_dir): - return top1_issue, sub_questionss_result + sub_questions_result = None model_id = pl.generator.model_id vllm_endpoint = pl.generator.vllm_endpoint - cfg = OmegaConf.load(search_config_path) - cfg.query_matcher.model_id = model_id - cfg.query_matcher.API_BASE = os.path.join(vllm_endpoint, "v1/completions") - query_matcher = LogitsEstimatorJSON(**cfg.query_matcher) maintenance_data = read_json_files(search_dir) - issues = list(maintenance_data.keys()) + issues = [] + for i in range(len(maintenance_data)): + issues.append(maintenance_data[i]["question"]) if not issues: - return top1_issue, sub_questionss_result - + return top1_issue, sub_questions_result + + cfg = {} + if not os.path.exists(search_config_path): + cfg["query_matcher"] = { + "instructions": "You're a knowledgeable assistant. Your task is to judge if two queries ask for the same information about the same primary subject. Output only 'Yes' or 'No'. Yes = same subject entity AND same information need, with only wording or stylistic differences. No = different subject entity, different spec or numeric constraint, different attribute/metric, or scope changed by adding/removing a restricting condition. Entity changes MUST lead to No.", + "input_template": "Query 1: {}\nQuery 2: {}\n", + "output_template": "\nAre these queries equivalent? Answer 'Yes' or 'No':", + "json_key": "similarity", + "json_levels": ["No", "Yes"], + "temperature": 0.1, + } + else: + cfg = OmegaConf.load(search_config_path) + cfg["query_matcher"]["model_id"] = model_id + cfg["query_matcher"]["API_BASE"] = os.path.join(vllm_endpoint, "v1/completions") + query_matcher = LogitsEstimatorJSON(**cfg["query_matcher"]) semaphore = asyncio.Semaphore(200) async def limited_compute_score(query_matcher, user_input, issue): @@ -219,9 +240,9 @@ async def limited_compute_score(query_matcher, user_input, issue): # Maximum less than 0.6, we don't use query search. if match_scores[0][1] < 0.6: - return top1_issue, sub_questionss_result + return top1_issue, sub_questions_result top1_issue = match_scores[0][0] - for key, value in maintenance_data.items(): - if key == top1_issue: - sub_questionss_result = value - return top1_issue, sub_questionss_result + for i in range(len(maintenance_data)): + if maintenance_data[i]["question"] == top1_issue: + sub_questions_result = "\n".join(maintenance_data[i]["content"]) + return top1_issue, sub_questions_result diff --git a/EdgeCraftRAG/edgecraftrag/components/retriever.py b/EdgeCraftRAG/edgecraftrag/components/retriever.py index fa8553346a..cdd3fe0bc2 100644 --- a/EdgeCraftRAG/edgecraftrag/components/retriever.py +++ b/EdgeCraftRAG/edgecraftrag/components/retriever.py @@ -1,14 +1,19 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from typing import Any, List, cast +import warnings +from typing import Any, List, Optional, cast +import requests from edgecraftrag.base import BaseComponent, CompType, RetrieverType +from langchain_milvus import Milvus +from langchain_openai import OpenAIEmbeddings from llama_index.core.indices.vector_store.retrievers import VectorIndexRetriever from llama_index.core.retrievers import AutoMergingRetriever -from llama_index.core.schema import BaseNode +from llama_index.core.schema import BaseNode, Document, NodeWithScore from llama_index.retrievers.bm25 import BM25Retriever from pydantic import model_serializer +from pymilvus import Collection, MilvusException, connections, utility class VectorSimRetriever(BaseComponent, VectorIndexRetriever): @@ -39,6 +44,8 @@ def __init__(self, indexer, **kwargs): def run(self, **kwargs) -> Any: for k, v in kwargs.items(): if k == "query": + top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk + self.similarity_top_k = top_k return self.retrieve(v) return None @@ -75,8 +82,9 @@ def __init__(self, indexer, **kwargs): def run(self, **kwargs) -> Any: for k, v in kwargs.items(): if k == "query": + top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk # vector_retriever needs to be updated - self._vector_retriever = self._index.as_retriever(similarity_top_k=self.topk) + self._vector_retriever = self._index.as_retriever(similarity_top_k=top_k) return self.retrieve(v) return None @@ -108,8 +116,9 @@ def __init__(self, indexer, **kwargs): def run(self, **kwargs) -> Any: for k, v in kwargs.items(): if k == "query": + top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk nodes = cast(List[BaseNode], list(self._docstore.docs.values())) - similarity_top_k = min(len(nodes), self.topk) + similarity_top_k = min(len(nodes), top_k) bm25_retr = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=similarity_top_k) return bm25_retr.retrieve(v) @@ -123,3 +132,139 @@ def ser_model(self): "retrieve_topk": self.topk, } return set + + +class KBadminRetriever(BaseComponent): + def __init__(self, indexer, **kwargs): + BaseComponent.__init__( + self, + comp_type=CompType.RETRIEVER, + comp_subtype=RetrieverType.KBADMIN_RETRIEVER, + ) + self.vector_db = None + self.collection_name = None + self.topk = kwargs.get("similarity_top_k", 30) + self.KBADMIN_MILVUS_URL = indexer.vector_url + self.CONNECTION_ARGS = {"uri": indexer.vector_url} + self.vector_field = "q_1024_vec" + self.text_field = "content_with_weight" + self.embedding_model_name = indexer.embed_model + self.embedding_url = indexer.kbadmin_embedding_url + "/v3" + self.embedding = OpenAIEmbeddings( + model=self.embedding_model_name, + api_key="unused", + base_url=self.embedding_url, + tiktoken_enabled=False, + embedding_ctx_length=510, + ) + + def config_kbadmin_milvus(self, knowledge_name): + collection_name = knowledge_name + if not kbs_rev_maps: + get_kbs_info(self.CONNECTION_ARGS) + collection_name = kbs_rev_maps[collection_name] + self.vector_db = Milvus( + self.embedding, + connection_args=self.CONNECTION_ARGS, + collection_name=collection_name, + vector_field=self.vector_field, + text_field=self.text_field, + enable_dynamic_field=True, + index_params={"index_type": "FLAT", "metric_type": "IP", "params": {}}, + ) + + def similarity_search_with_embedding(self, query: str, k) -> list[tuple[Document, float]]: + url = self.embedding_url + "/embeddings" + embedding_info = {"model": self.embedding_model_name, "input": query} + # Get embedding result from embedding service + response = requests.post(url, headers={"Content-Type": "application/json"}, json=embedding_info) + embedding_json = response.json() + embedding = embedding_json["data"][0]["embedding"] + docs_and_scores = self.vector_db.similarity_search_with_score_by_vector(embedding=embedding, k=k) + relevance_score_fn = self.vector_db._select_relevance_score_fn() + return [(doc, relevance_score_fn(score)) for doc, score in docs_and_scores] + + def run(self, **kwargs) -> Any: + query = kwargs["query"] + top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk + # langchain retrieval + docs_and_similarities = self.similarity_search_with_embedding(query=query, k=top_k) + node_with_scores: List[NodeWithScore] = [] + for doc, similarity in docs_and_similarities: + score: Optional[float] = None + if similarity is not None: + score = similarity + # convert langchain store format into llamaindex + node = Document.from_langchain_format(doc) + node_with_scores.append(NodeWithScore(node=node, score=score)) + return node_with_scores + + @model_serializer + def ser_model(self): + set = {"idx": self.idx, "retriever_type": self.comp_subtype, "CONNECTION_ARGS": self.CONNECTION_ARGS} + return set + + +# global kbs maps. +global kbs_rev_maps +kbs_rev_maps = {} + + +def get_kbs_info(CONNECTION_ARGS): + alias = "default" + try: + connections.connect("default", **CONNECTION_ARGS) + collections = utility.list_collections() + all_kb_infos = {} + new_infos = {} + for kb in collections: + collection = Collection(kb) + collection.load() + try: + if any(field.name == "kb_id" for field in collection.schema.fields): + docs = collection.query( + expr="pk != 0", + output_fields=["kb_name", "kb_id", "docnm_kwd"], + timeout=10, + ) + else: + docs = collection.query( + expr="pk != 0", + output_fields=["filename"], + timeout=10, + ) + collection.release() + except MilvusException as e: + continue + this_kbinfo = {} + for doc in docs: + try: + if "kb_name" in doc: + if not this_kbinfo: + this_kbinfo["name"] = doc["kb_name"] + this_kbinfo["uuid"] = doc["kb_id"] + this_kbinfo["files"] = set([doc["docnm_kwd"]]) + else: + this_kbinfo["files"].add(doc["docnm_kwd"]) + else: + if not this_kbinfo: + this_kbinfo["name"] = kb + this_kbinfo["uuid"] = "" + this_kbinfo["files"] = set([doc["filename"]]) + else: + this_kbinfo["files"].add(doc["filename"]) + except KeyError: + this_kbinfo = None + break + if this_kbinfo: + unique_files = list(this_kbinfo["files"]) + this_kbinfo["files"] = unique_files + new_infos[kb] = this_kbinfo + all_kb_infos.update(new_infos) + kbs_rev_maps.clear() + for kb_id in all_kb_infos: + kbs_rev_maps[all_kb_infos[kb_id]["name"]] = kb_id + return kbs_rev_maps + finally: + if connections.has_connection(alias): + connections.disconnect(alias) diff --git a/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py b/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py index b8dd82ab7b..c956ee316d 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py @@ -26,6 +26,34 @@ def search_parser(self, npin: NodeParserIn) -> BaseComponent: return v return None + def search_parser_change(self, pl, req): + pl_change = False + try: + if pl.node_parser.comp_subtype != req.node_parser.parser_type: + return True + if pl.node_parser.comp_subtype == req.node_parser.parser_type: + if pl.node_parser.comp_subtype == NodeParserType.SIMPLE: + if ( + pl.node_parser.chunk_size != req.node_parser.chunk_size + or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap + ): + pl_change = True + elif pl.node_parser.comp_subtype == NodeParserType.SENTENCEWINDOW: + if pl.node_parser.window_size != req.node_parser.window_size: + pl_change = True + elif pl.node_parser.comp_subtype == NodeParserType.HIERARCHY: + if pl.node_parser.chunk_sizes != req.node_parser.chunk_sizes: + pl_change = True + elif pl.node_parser.comp_subtype == NodeParserType.UNSTRUCTURED: + if ( + pl.node_parser.chunk_size != req.node_parser.chunk_size + or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap + ): + pl_change = True + except: + return False + return pl_change + class IndexerMgr(BaseMgr): @@ -43,6 +71,7 @@ def search_indexer(self, indin: IndexerIn) -> BaseComponent: (v.model.model_id_or_path == indin.embedding_model.model_id) or (v.model.model_id_or_path == indin.embedding_model.model_path) ) + and v.model.device == indin.embedding_model.device ): return v return None diff --git a/EdgeCraftRAG/edgecraftrag/controllers/filemgr.py b/EdgeCraftRAG/edgecraftrag/controllers/filemgr.py index 0278f1f6ac..6f29e931e4 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/filemgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/filemgr.py @@ -20,7 +20,7 @@ def add_text(self, text: str): self.add(file) return file.documents - def add_files(self, docs: Any): + def add_files(self, docs: Any, docs_name: str = "default"): if not isinstance(docs, list): docs = [docs] @@ -41,38 +41,31 @@ def add_files(self, docs: Any): for file_path in files: file = File(file_path=file_path) - self.add(file) + self.append(file, docs_name) input_docs.extend(file.documents) - return input_docs - def get_file_by_name_or_id(self, name: str): - for _, file in self.components.items(): - if file.name == name or file.idx == name: - return file + def get_file_by_name(self, docs_name: str = "default", file_path: str = None): + for name, files in self.components.items(): + if docs_name == name: + for file in files: + if file_path == file.documents[0].metadata["file_path"]: + return file.documents return None - def get_files(self): - return [file for _, file in self.components.items()] + def get_kb_files_by_name(self, docs_name: str = "default"): + file_docs = [] + for name, files in self.components.items(): + if name == docs_name: + return files + return file_docs def get_all_docs(self) -> List[Document]: - all_docs = [] - for _, file in self.components.items(): - all_docs.extend(file.documents) + all_docs = {} + for doc_name, files in self.components.items(): + all_docs[doc_name] = files return all_docs - def get_docs_by_file(self, name) -> List[Document]: - file = self.get_file_by_name_or_id(name) - return file.documents if file else [] - - def del_file(self, name): - file = self.get_file_by_name_or_id(name) - if file: - self.remove(file.idx) - return True - else: - return False - def update_file(self, name): file = self.get_file_by_name_or_id(name) if file: @@ -81,3 +74,20 @@ def update_file(self, name): return True else: return False + + def del_kb_file(self, docs_name: str = "default"): + files = self.get_kb_files_by_name(docs_name) + if files: + self.remove(docs_name) + + def del_file(self, docs_name: str = "default", file_path: str = None): + files = self.get_file_by_name(docs_name, file_path) + docs_list = [] + for docs_file in files: + docs_list.append(docs_file.id_) + files = self.get_kb_files_by_name(docs_name) + for docs_file in files: + if file_path == docs_file.documents[0].metadata["file_path"]: + self.components[docs_name].remove(docs_file) + return docs_list + return None diff --git a/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py b/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py index dc69943eb2..d6dbba3ead 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py @@ -13,6 +13,7 @@ class KnowledgeManager(BaseMgr): def __init__(self): super().__init__() self.active_knowledge_idx: Optional[str] = None + self.active_experience_idx: Optional[str] = None def get_knowledge_base_by_name_or_id(self, name: str): for _, kb in self.components.items(): @@ -26,8 +27,17 @@ def get_active_knowledge_base(self) -> Optional[Knowledge]: else: return None + def get_active_experience(self): + if self.active_experience_idx: + return self.get_knowledge_base_by_name_or_id(self.active_experience_idx) + else: + return None + def active_knowledge(self, knowledge: KnowledgeBaseCreateIn): kb = self.get_knowledge_base_by_name_or_id(knowledge.name) + if kb.comp_type != "knowledge": + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Experience type cannot be active") + kb = self.get_knowledge_base_by_name_or_id(knowledge.name) self.active_knowledge_idx = kb.idx if knowledge.active else None for idx, comp in self.components.items(): @@ -35,16 +45,44 @@ def active_knowledge(self, knowledge: KnowledgeBaseCreateIn): comp.active = idx == self.active_knowledge_idx return kb + def active_experience(self, knowledge: KnowledgeBaseCreateIn): + kb = self.get_knowledge_base_by_name_or_id(knowledge.name) + if kb.comp_type != "experience": + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Knowledge type cannot be active") + self.active_experience_idx = kb.idx if knowledge.experience_active else None + if kb.experience_active != knowledge.experience_active: + for idx, comp in self.components.items(): + if isinstance(comp, Knowledge): + comp.experience_active = idx == self.active_experience_idx + return kb + def create_knowledge_base(self, knowledge: KnowledgeBaseCreateIn) -> Knowledge: for _, kb in self.components.items(): if kb.name == knowledge.name: raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="The knowledge base already exists.") + if knowledge.comp_type == "experience": + for idx, kb in self.components.items(): + if kb.comp_type == "experience": + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, detail="Only one experience class can be created." + ) + if knowledge.comp_type == "experience": + knowledge.active = False if knowledge.active is None: knowledge.active = False - kb = Knowledge(name=knowledge.name, description=knowledge.description, active=knowledge.active) + kb = Knowledge( + name=knowledge.name, + description=knowledge.description, + active=knowledge.active, + comp_type=knowledge.comp_type, + comp_subtype=knowledge.comp_subtype, + experience_active=knowledge.experience_active, + ) self.add(kb) if knowledge.active: self.active_knowledge(knowledge) + if knowledge.experience_active: + self.active_experience(knowledge) return kb def delete_knowledge_base(self, name: str): @@ -54,12 +92,16 @@ def delete_knowledge_base(self, name: str): def update_knowledge_base(self, knowledge) -> Knowledge: kb = self.get_knowledge_base_by_name_or_id(knowledge.name) - - if knowledge.description is not None: - kb.description = knowledge.description - - if knowledge.active is not None and kb.active != knowledge.active: - kb = self.active_knowledge(knowledge) + if kb.comp_type == "knowledge": + if knowledge.description is not None: + kb.description = knowledge.description + if knowledge.active is not None and kb.active != knowledge.active: + kb = self.active_knowledge(knowledge) + if kb.comp_type == "experience": + if knowledge.description is not None: + kb.description = knowledge.description + if knowledge.experience_active is not None and kb.experience_active != knowledge.experience_active: + kb = self.active_experience(knowledge) return "Knowledge base update successfully" def get_all_knowledge_bases(self) -> List[Dict[str, Any]]: @@ -67,3 +109,8 @@ def get_all_knowledge_bases(self) -> List[Dict[str, Any]]: for idx, kb in self.components.items(): kb_list.append(kb) return kb_list + + def get_experience_kb(self): + for idx, kb in self.components.items(): + if kb.comp_type == "experience": + return kb diff --git a/EdgeCraftRAG/edgecraftrag/controllers/pipelinemgr.py b/EdgeCraftRAG/edgecraftrag/controllers/pipelinemgr.py index 81524a3754..b22f0c66df 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/pipelinemgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/pipelinemgr.py @@ -66,8 +66,8 @@ def activate_pipeline(self, name: str, active: bool, nm: NodeMgr, kb_name: None) return nodelist = None - if pl.node_changed: - nodelist = nm.get_nodes(pl.node_parser.idx) + # if pl.node_changed: + # nodelist = nm.get_nodes(pl.node_parser.idx) pl.check_active(nodelist, kb_name) prevactive = self._active_pipeline if prevactive: diff --git a/EdgeCraftRAG/edgecraftrag/requirements.txt b/EdgeCraftRAG/edgecraftrag/requirements.txt old mode 100644 new mode 100755 index 8dc53e6c83..289ba3ef4d --- a/EdgeCraftRAG/edgecraftrag/requirements.txt +++ b/EdgeCraftRAG/edgecraftrag/requirements.txt @@ -3,11 +3,13 @@ EbookLib>=0.18 faiss-cpu>=1.8.0.post1 html2text>=2025.4.15 langchain-core==0.3.60 -llama-index==0.12.41 -llama-index-core==0.12.41 +langchain-milvus +langchain-openai +llama-index==0.12.36 +llama-index-core==0.12.37 llama-index-embeddings-openvino==0.5.2 -llama-index-llms-openai==0.4.0 -llama-index-llms-openai-like==0.4.0 +llama-index-llms-openai==0.3.44 +llama-index-llms-openai-like==0.3.4 llama-index-llms-openvino==0.4.0 llama-index-postprocessor-openvino-rerank==0.4.1 llama-index-readers-file==0.4.7 @@ -20,6 +22,6 @@ pillow>=10.4.0 py-cpuinfo>=9.0.0 pymilvus==2.5.10 python-docx==1.1.2 -unstructured==0.16.11 +unstructured unstructured[pdf] werkzeug==3.1.3 diff --git a/EdgeCraftRAG/edgecraftrag/utils.py b/EdgeCraftRAG/edgecraftrag/utils.py index 18a43e5879..1eef20f8c2 100755 --- a/EdgeCraftRAG/edgecraftrag/utils.py +++ b/EdgeCraftRAG/edgecraftrag/utils.py @@ -44,15 +44,14 @@ def iter_elements(cls, paragraph: Paragraph, opts: DocxPartitionerOptions) -> It yield Image(text="IMAGE", metadata=element_metadata) -def get_prompt_template(model_id, prompt_content=None, template_path=None, enable_think=False): +def get_prompt_template(model_path, prompt_content=None, template_path=None, enable_think=False): if prompt_content is not None: template = prompt_content elif template_path is not None: template = Path(template_path).read_text(encoding=None) else: template = DEFAULT_TEMPLATE - tokenizer = AutoTokenizer.from_pretrained(model_id) - model_id = model_id.split("/")[-1] + tokenizer = AutoTokenizer.from_pretrained(model_path) messages = [{"role": "system", "content": template}, {"role": "user", "content": "\n{input}\n"}] prompt_template = tokenizer.apply_chat_template( messages, @@ -90,10 +89,6 @@ def compare_mappings(new_dict, old_dict): deleted = {name: old_files[name] for name in set(old_files) - set(new_files)} if deleted: deleted_files[key] = deleted - - for key in list(added_files.keys()): - if key in deleted_files: - del added_files[key] return added_files, deleted_files @@ -126,7 +121,7 @@ def concat_history(message: str) -> str: max_token = 6000 active_pl = ctx.get_pipeline_mgr().get_active_pipeline() if active_pl.generator.inference_type == InferenceType.VLLM: - vllm_max_len = int(os.getenv("MAX_MODEL_LEN", "5000")) + vllm_max_len = int(os.getenv("MAX_MODEL_LEN", "10240")) if vllm_max_len > 5000: max_token = vllm_max_len - 1024 diff --git a/EdgeCraftRAG/nginx/nginx-conf-generator.sh b/EdgeCraftRAG/nginx/nginx-conf-generator.sh index bd8e5b194c..f12799f583 100644 --- a/EdgeCraftRAG/nginx/nginx-conf-generator.sh +++ b/EdgeCraftRAG/nginx/nginx-conf-generator.sh @@ -25,7 +25,7 @@ EOL # Generate the server lines for ((i=0; i> $2 + echo " server ${HOST_IP}:${!PORT_VAR:-8$((i+1))00};" >> $2 done # Close the upstream block and the http block diff --git a/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh b/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh index 192e18b67c..700dd92990 100755 --- a/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh +++ b/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh @@ -35,7 +35,7 @@ TENSOR_PARALLEL_SIZE=1 SELECTED_XPU_0=0 vLLM_ENDPOINT="http://${HOST_IP}:${NGINX_PORT}" LLM_MODEL="Qwen/Qwen3-8B" -LLM_MODEL_PATH="${HOME}/qwen/" +LLM_MODEL_PATH="${MODEL_PATH}/${LLM_MODEL}" NGINX_CONFIG_PATH="$WORKPATH/nginx/nginx.conf" VLLM_IMAGE_TAG="0.8.3-b20" DP_NUM=1 diff --git a/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh b/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh new file mode 100755 index 0000000000..df97274a89 --- /dev/null +++ b/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh @@ -0,0 +1,173 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -e +source ./common.sh + +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" + +ip_address=$(hostname -I | awk '{print $1}') +HOST_IP=$ip_address + +COMPOSE_FILE="compose_vllm_b60.yaml" +EC_RAG_SERVICE_PORT=16010 + +MODEL_PATH="${HOME}/models" +# MODEL_PATH="$WORKPATH/models" +DOC_PATH="$WORKPATH/tests" +UI_UPLOAD_PATH="$WORKPATH/tests" + +HF_ENDPOINT=https://hf-mirror.com +VLLM_SERVICE_PORT_B60=8086 +TP=1 +vLLM_ENDPOINT="http://${HOST_IP}:${VLLM_SERVICE_PORT_B60}" +LLM_MODEL="Qwen/Qwen3-8B" +VLLM_IMAGE_TAG="1.0" +DP=1 + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + echo "GenAIComps test commit is $(git rev-parse HEAD)" + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . + popd && sleep 1s + + echo "Pull intel/llm-scaler-vllm image" + docker pull intel/llm-scaler-vllm:${VLLM_IMAGE_TAG} + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log + + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/intel/gpu/arc + source set_env.sh + # Start Docker Containers + docker compose -f $COMPOSE_FILE up -d > ${LOG_PATH}/start_services_with_compose.log + echo "ipex-serving-xpu is booting, please wait." + sleep 30s + n=0 + until [[ "$n" -ge 100 ]]; do + docker logs ipex-serving-xpu-container > ${LOG_PATH}/ipex-serving-xpu-container.log 2>&1 + if grep -q "Starting vLLM API server on http://0.0.0.0:" ${LOG_PATH}/ipex-serving-xpu-container.log; then + break + fi + sleep 6s + n=$((n+1)) + done +} + +function validate_services() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + echo "[ $SERVICE_NAME ] Validating $SERVICE_NAME service..." + local RESPONSE=$(curl -s -w "%{http_code}" -o ${LOG_PATH}/${SERVICE_NAME}.log -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + while [ ! -f ${LOG_PATH}/${SERVICE_NAME}.log ]; do + sleep 1 + done + local HTTP_STATUS="${RESPONSE: -3}" + local CONTENT=$(cat ${LOG_PATH}/${SERVICE_NAME}.log) + + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + sleep 1s +} + +function validate_rag() { + cd $WORKPATH/tests + + # setup pipeline + validate_services \ + "${HOST_IP}:${EC_RAG_SERVICE_PORT}/v1/settings/pipelines" \ + "active" \ + "pipeline" \ + "edgecraftrag-server" \ + '@configs/test_pipeline_ipex_vllm.json' + + # add data + validate_services \ + "${HOST_IP}:${EC_RAG_SERVICE_PORT}/v1/data" \ + "Done" \ + "data" \ + "edgecraftrag-server" \ + '@configs/test_data.json' + + # query + validate_services \ + "${HOST_IP}:${EC_RAG_SERVICE_PORT}/v1/chatqna" \ + "1234567890" \ + "query" \ + "ipex-serving-xpu-container" \ + '{"messages":"What is the test id?","max_tokens":5}' +} + +function validate_megaservice() { + # Curl the Mega Service + validate_services \ + "${HOST_IP}:16011/v1/chatqna" \ + "1234567890" \ + "query" \ + "ipex-serving-xpu-container" \ + '{"messages":"What is the test id?","max_tokens":5}' +} + +function stop_docker() { + cd $WORKPATH/docker_compose/intel/gpu/arc + export MODEL_PATH="${HOME}/models" + docker compose -f $COMPOSE_FILE down +} + + +function main() { + mkdir -p $LOG_PATH + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + echo "::group::build_docker_images" + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + echo "::endgroup::" + + echo "::group::start_services" + start_services + echo "::endgroup::" + + echo "::group::validate_rag" + validate_rag + echo "::endgroup::" + + echo "::group::validate_megaservice" + validate_megaservice + echo "::endgroup::" + + echo "::group::stop_docker" + stop_docker + echo y | docker system prune + echo "::endgroup::" + +} + +main diff --git a/EdgeCraftRAG/tools/quick_start.sh b/EdgeCraftRAG/tools/quick_start.sh index fdefa797f0..27525d1f97 100755 --- a/EdgeCraftRAG/tools/quick_start.sh +++ b/EdgeCraftRAG/tools/quick_start.sh @@ -5,6 +5,8 @@ set -e WORKPATH=$(dirname "$(pwd)") +ip_address=$(hostname -I | awk '{print $1}') +HOST_IP=$ip_address get_user_input() { local var_name=$1 @@ -32,7 +34,7 @@ function start_vllm_services() { MILVUS_ENABLED=$(get_enable_function "MILVUS DB(Enter 1 for enable)" "0") CHAT_HISTORY_ROUND=$(get_user_input "chat history round" "0") LLM_MODEL=$(get_user_input "your LLM model" "Qwen/Qwen3-8B") - MODEL_PATH=$(get_user_input "your model path" "${HOME}/models") + MODEL_PATH=$(get_user_input "your model path" "${PWD}/models") read -p "Have you prepare models in ${MODEL_PATH}:(yes/no) [yes]" user_input user_input=${user_input:-"yes"} @@ -63,14 +65,20 @@ function start_vllm_services() { # vllm ENV export NGINX_PORT=8086 export vLLM_ENDPOINT="http://${HOST_IP}:${NGINX_PORT}" - TENSOR_PARALLEL_SIZE=$(get_user_input "your tp size" 1) - read -p "selected GPU [$(seq -s, 0 $((TENSOR_PARALLEL_SIZE - 1)))] " SELECTED_XPU_0; SELECTED_XPU_0=${SELECTED_XPU_0:-$(seq -s, 0 $((TENSOR_PARALLEL_SIZE - 1)))} - DP_NUM=$(get_user_input "DP number(how many containers to run vLLM)" 1) - for (( x=0; x ipex-llm-serving-xpu-container.log 2>&1 + if grep -q "Starting vLLM API server on http://0.0.0.0:" ipex-llm-serving-xpu-container.log; then + break + fi + sleep 6s + n=$((n+1)) + done + rm -rf ipex-llm-serving-xpu-container.log + echo "service launched, please visit UI at ${HOST_IP}:8082" +} + + +function quick_start_ov_services() { + COMPOSE_FILE="compose.yaml" + echo "stop former service..." + docker compose -f $WORKPATH/docker_compose/intel/gpu/arc/$COMPOSE_FILE down + + ip_address=$(hostname -I | awk '{print $1}') + export HOST_IP=${HOST_IP:-"${ip_address}"} + export DOC_PATH=${DOC_PATH:-"$WORKPATH/tests"} + export TMPFILE_PATH=${TMPFILE_PATH:-"$WORKPATH/tests"} + export MILVUS_ENABLED=${MILVUS_ENABLED:-1} + export CHAT_HISTORY_ROUND=${CHAT_HISTORY_ROUND:-"0"} + export LLM_MODEL=${LLM_MODEL:-"Qwen/Qwen3-8B"} + export MODEL_PATH=${MODEL_PATH:-"${HOME}/models"} + export VIDEOGROUPID=$(getent group video | cut -d: -f3) + export RENDERGROUPID=$(getent group render | cut -d: -f3) + + check_baai_folder + export HF_CACHE=${HF_CACHE:-"${HOME}/.cache"} + if [ ! -d "${HF_CACHE}" ]; then + mkdir -p "${HF_CACHE}" + echo "Created directory: ${HF_CACHE}" + fi + + sudo chown 1000:1000 "${MODEL_PATH}" "${DOC_PATH}" "${TMPFILE_PATH}" + sudo chown -R 1000:1000 "${HF_CACHE}" + export HF_ENDPOINT=${HF_ENDPOINT:-"https://hf-mirror.com"} + export no_proxy="localhost, 127.0.0.1, 192.168.1.1, ${HOST_IP}" + export CCL_DG2_USM=${CCL_DG2_USM:-0} + + echo "Starting service..." + docker compose -f "$WORKPATH/docker_compose/intel/gpu/arc/$COMPOSE_FILE" up -d +} + + +function start_vLLM_B60_services() { + COMPOSE_FILE="compose_vllm_b60.yaml" + echo "stop former service..." + docker compose -f $WORKPATH/docker_compose/intel/gpu/arc/$COMPOSE_FILE down + + ip_address=$(hostname -I | awk '{print $1}') + HOST_IP=$(get_user_input "host ip" "${ip_address}") + DOC_PATH=$(get_user_input "DOC_PATH" "$WORKPATH/tests") + TMPFILE_PATH=$(get_user_input "TMPFILE_PATH" "$WORKPATH/tests") + MILVUS_ENABLED=$(get_enable_function "MILVUS DB(Enter 1 for enable)" "0") + CHAT_HISTORY_ROUND=$(get_user_input "chat history round" "0") + LLM_MODEL=$(get_user_input "your LLM model" "Qwen/Qwen3-72B") + MODEL_PATH=$(get_user_input "your model path" "${PWD}/models") + read -p "Have you prepare models in ${MODEL_PATH}:(yes/no) [yes]" user_input + user_input=${user_input:-"yes"} + + if [ "$user_input" == "yes" ]; then + # 模型文件路径请参考以下形式存放, llm为huggingface + # Indexer: ${MODEL_PATH}/BAAI/bge-small-en-v1.5 + # Reranker: ${MODEL_PATH}/BAAI/bge-reranker-large + # llm :${MODEL_PATH}/${LLM_MODEL} (从huggingface或modelscope下载的原始模型,而不是经过OpenVINO转换的模型!) + echo "you skipped model downloading, please make sure you have prepared all models under ${MODEL_PATH}" + else + echo "you have not prepare models, starting to download models into ${MODEL_PATH}..." + mkdir -p $MODEL_PATH + pip install --upgrade --upgrade-strategy eager "optimum[openvino]" + optimum-cli export openvino -m BAAI/bge-small-en-v1.5 ${MODEL_PATH}/BAAI/bge-small-en-v1.5 --task sentence-similarity + optimum-cli export openvino -m BAAI/bge-reranker-large ${MODEL_PATH}/BAAI/bge-reranker-large --task text-classification + pip install -U huggingface_hub + huggingface-cli download $LLM_MODEL --local-dir "${MODEL_PATH}/${LLM_MODEL}" + fi + echo "give permission to related path..." + sudo chown 1000:1000 ${MODEL_PATH} ${DOC_PATH} ${TMPFILE_PATH} + # vllm ENV + export VLLM_SERVICE_PORT_B60=8086 + export vLLM_ENDPOINT="http://${HOST_IP}:${VLLM_SERVICE_PORT_B60}" + read -p "DP number(how many containers to run B60_vLLM) [4] , press Enter to confirm, or type a new value:" DP; DP=${DP:-4} + read -p "Tensor parallel size(your tp size [1]), press Enter to confirm, or type a new value:" TP; TP=${TP:-1} + DTYPE=$(get_user_input "DTYPE (vLLM data type, e.g. float16/bfloat16)" "float16") + ZE_AFFINITY_MASK=$(get_user_input "ZE_AFFINITY_MASK (GPU affinity mask, multi-GPU use 0,1,2...)" "0,1,2,3") + ENFORCE_EAGER=$(get_user_input "ENFORCE_EAGER (enable eager execution, 1=enable/0=disable)" "1") + TRUST_REMOTE_CODE=$(get_user_input "TRUST_REMOTE_CODE (trust remote code for custom models, 1=enable/0=disable)" "1") + DISABLE_SLIDING_WINDOW=$(get_user_input "DISABLE_SLIDING_WINDOW (disable sliding window attention, 1=disable/0=enable)" "1") + GPU_MEMORY_UTIL=$(get_user_input "GPU_MEMORY_UTIL (GPU memory utilization, range 0.1-1.0)" "0.8") + NO_ENABLE_PREFIX_CACHING=$(get_user_input "NO_ENABLE_PREFIX_CACHING (disable prefix caching, 1=disable/0=enable)" "1") + MAX_NUM_BATCHED_TOKENS=$(get_user_input "MAX_NUM_BATCHED_TOKENS (max number of batched tokens)" "8192") + DISABLE_LOG_REQUESTS=$(get_user_input "DISABLE_LOG_REQUESTS (disable request logs, 1=disable/0=enable)" "1") + MAX_MODEL_LEN=$(get_user_input "MAX_MODEL_LEN (max model context length, e.g. 49152/10240)" "49152") + BLOCK_SIZE=$(get_user_input "BLOCK_SIZE (vLLM block size)" "64") + QUANTIZATION=$(get_user_input "QUANTIZATION (model quantization method, e.g. fp8/int4)" "fp8") + # export ENV + export MODEL_PATH=${MODEL_PATH} + export DOC_PATH=${DOC_PATH} + export TMPFILE_PATH=${TMPFILE_PATH} + export LLM_MODEL=${LLM_MODEL} + export no_proxy="localhost, 127.0.0.1, 192.168.1.1, ${HOST_IP}" + export MILVUS_ENABLED=${MILVUS_ENABLED} + export CHAT_HISTORY_ROUND=${CHAT_HISTORY_ROUND} + export SELECTED_XPU_0=${SELECTED_XPU_0} + export VIDEOGROUPID=$(getent group video | cut -d: -f3) + export RENDERGROUPID=$(getent group render | cut -d: -f3) + # export vllm ENV + export DP=${DP} + export TP=${TP} + export DTYPE=${DTYPE} + export ZE_AFFINITY_MASK=${ZE_AFFINITY_MASK} + export ENFORCE_EAGER=${ENFORCE_EAGER} + export TRUST_REMOTE_CODE=${TRUST_REMOTE_CODE} + export DISABLE_SLIDING_WINDOW=${DISABLE_SLIDING_WINDOW} + export GPU_MEMORY_UTIL=${GPU_MEMORY_UTIL} + export NO_ENABLE_PREFIX_CACHING=${NO_ENABLE_PREFIX_CACHING} + export MAX_NUM_BATCHED_TOKENS=${MAX_NUM_BATCHED_TOKENS} + export DISABLE_LOG_REQUESTS=${DISABLE_LOG_REQUESTS} + export MAX_MODEL_LEN=${MAX_MODEL_LEN} + export BLOCK_SIZE=${BLOCK_SIZE} + export QUANTIZATION=${QUANTIZATION} + + # Start Docker Containers + docker compose -f $WORKPATH/docker_compose/intel/gpu/arc/$COMPOSE_FILE up -d + echo "ipex-llm-serving-xpu is booting, please wait..." + n=0 + until [[ "$n" -ge 100 ]]; do + docker logs ipex-llm-serving-xpu-container-0 > ipex-llm-serving-xpu-container.log 2>&1 + if grep -q "Starting vLLM API server on http://0.0.0.0:" ipex-llm-serving-xpu-container.log; then + break + fi + sleep 6s + n=$((n+1)) + done + rm -rf ipex-llm-serving-xpu-container.log + echo "service launched, please visit UI at ${HOST_IP}:8082" +} + + +function quick_start_vllm_B60_services() { + WORKPATH=$(dirname "$PWD") + COMPOSE_FILE="compose_vllm_b60.yaml" + EC_RAG_SERVICE_PORT=16010 + docker compose -f $WORKPATH/docker_compose/intel/gpu/arc/$COMPOSE_FILE down + + export HOST_IP=${HOST_IP:-"${ip_address}"} + export MODEL_PATH=${MODEL_PATH:-"${PWD}/models"} + export DOC_PATH=${DOC_PATH:-"$WORKPATH/tests"} + export TMPFILE_PATH=${TMPFILE_PATH:-"$WORKPATH/tests"} + export MILVUS_ENABLED=${MILVUS_ENABLED:-1} + export CHAT_HISTORY_ROUND=${CHAT_HISTORY_ROUND:-2} + export LLM_MODEL=${LLM_MODEL:-Qwen/Qwen3-72B} + export VIDEOGROUPID=$(getent group video | cut -d: -f3) + export RENDERGROUPID=$(getent group render | cut -d: -f3) + # export vllm ENV + export DP=${DP:-4} + export TP=${TP:-1} + export DTYPE=${DTYPE:-float16} + export ZE_AFFINITY_MASK=${ZE_AFFINITY_MASK:-0,1,2,3} + export ENFORCE_EAGER=${ENFORCE_EAGER:-1} + export TRUST_REMOTE_CODE=${TRUST_REMOTE_CODE:-1} + export DISABLE_SLIDING_WINDOW=${DISABLE_SLIDING_WINDOW:-1} + export GPU_MEMORY_UTIL=${GPU_MEMORY_UTIL:-0.8} + export NO_ENABLE_PREFIX_CACHING=${NO_ENABLE_PREFIX_CACHING:-1} + export MAX_NUM_BATCHED_TOKENS=${MAX_NUM_BATCHED_TOKENS:-8192} + export DISABLE_LOG_REQUESTS=${disable_LOG_REQUESTS:-1} + export MAX_MODEL_LEN=${MAX_MODEL_LEN:-49152} + export BLOCK_SIZE=${BLOCK_SIZE:-64} + export QUANTIZATION=${QUANTIZATION:-fp8} + + + check_baai_folder + export no_proxy="localhost, 127.0.0.1, 192.168.1.1, ${HOST_IP}" + sudo chown -R 1000:1000 ${MODEL_PATH} ${DOC_PATH} ${TMPFILE_PATH} + docker compose -f $WORKPATH/docker_compose/intel/gpu/arc/$COMPOSE_FILE up -d + echo "ipex-llm-serving-xpu is booting, please wait..." + n=0 + until [[ "$n" -ge 100 ]]; do + docker logs ipex-llm-serving-xpu-container-0 > ipex-llm-serving-xpu-container.log 2>&1 + if grep -q "Starting vLLM API server on http://0.0.0.0:" ipex-llm-serving-xpu-container.log; then + break + fi + sleep 6s + n=$((n+1)) + done + rm -rf ipex-llm-serving-xpu-container.log + echo "service launched, please visit UI at ${HOST_IP}:8082" +} + + +function main { + if [[ $- == *i* ]]; then + read -p "Do you want to start vLLM or local OpenVINO services? (vLLM_A770/vLLM_B60/ov) [vLLM_A770]: " user_input + user_input=${user_input:-"vLLM_A770"} + if [[ "$user_input" == "vLLM_A770" ]]; then + start_vllm_services + elif [[ "$user_input" == "vLLM_B60" ]]; then + start_vLLM_B60_services + else + start_services + fi else - start_services + export SERVICE_TYPE=${SERVICE_TYPE:-"vLLM_A770"} + if [[ "$SERVICE_TYPE" == "vLLM_A770" || "$SERVICE_TYPE" == "vLLM" ]]; then + quick_start_vllm_services + elif [[ "$SERVICE_TYPE" == "vLLM_B60" || "$SERVICE_TYPE" == "vLLM_b60" ]]; then + quick_start_vllm_B60_services + else + quick_start_ov_services + fi fi } diff --git a/EdgeCraftRAG/ui/vue/.env.development b/EdgeCraftRAG/ui/vue/.env.development index d7ef344a8a..ea6834f8a0 100644 --- a/EdgeCraftRAG/ui/vue/.env.development +++ b/EdgeCraftRAG/ui/vue/.env.development @@ -2,5 +2,5 @@ ENV = development # Local Api -VITE_API_URL = http://10.67.106.238:16010/ -VITE_CHATBOT_URL = http://10.67.106.238:16011/ +VITE_API_URL = / +VITE_CHATBOT_URL = / diff --git a/EdgeCraftRAG/ui/vue/components.d.ts b/EdgeCraftRAG/ui/vue/components.d.ts index 7959bda79a..599bc31cb8 100644 --- a/EdgeCraftRAG/ui/vue/components.d.ts +++ b/EdgeCraftRAG/ui/vue/components.d.ts @@ -45,6 +45,7 @@ declare module 'vue' { ASelectOption: typeof import('ant-design-vue/es')['SelectOption'] ASlider: typeof import('ant-design-vue/es')['Slider'] ASpace: typeof import('ant-design-vue/es')['Space'] + ASpin: typeof import('ant-design-vue/es')['Spin'] ASteps: typeof import('ant-design-vue/es')['Steps'] ATable: typeof import('ant-design-vue/es')['Table'] ATag: typeof import('ant-design-vue/es')['Tag'] @@ -52,6 +53,7 @@ declare module 'vue' { ATooltip: typeof import('ant-design-vue/es')['Tooltip'] AUploadDragger: typeof import('ant-design-vue/es')['UploadDragger'] FormTooltip: typeof import('./src/components/FormTooltip.vue')['default'] + PartialLoading: typeof import('./src/components/PartialLoading.vue')['default'] RouterLink: typeof import('vue-router')['RouterLink'] RouterView: typeof import('vue-router')['RouterView'] SvgIcon: typeof import('./src/components/SvgIcon.vue')['default'] diff --git a/EdgeCraftRAG/ui/vue/index.html b/EdgeCraftRAG/ui/vue/index.html index df137679ef..c871332d3c 100644 --- a/EdgeCraftRAG/ui/vue/index.html +++ b/EdgeCraftRAG/ui/vue/index.html @@ -9,6 +9,7 @@ + Edge Craft RAG based Q&A Chatbot diff --git a/EdgeCraftRAG/ui/vue/nginx.conf b/EdgeCraftRAG/ui/vue/nginx.conf index 6d9a233bf8..8b6701e78a 100644 --- a/EdgeCraftRAG/ui/vue/nginx.conf +++ b/EdgeCraftRAG/ui/vue/nginx.conf @@ -28,7 +28,7 @@ http { proxy_pass http://edgecraftrag-server:16010; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_http_version 1.1; - proxy_read_timeout 180s; + proxy_read_timeout 600s; proxy_set_header Connection ""; } diff --git a/EdgeCraftRAG/ui/vue/package.json b/EdgeCraftRAG/ui/vue/package.json index 516e870406..d56e123754 100644 --- a/EdgeCraftRAG/ui/vue/package.json +++ b/EdgeCraftRAG/ui/vue/package.json @@ -9,7 +9,6 @@ "preview": "vite preview" }, "dependencies": { - "@vueuse/i18n": "^4.0.0-beta.12", "ant-design-vue": "^4.0.0-rc.6", "axios": "^1.7.9", "clipboard": "^2.0.11", diff --git a/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts b/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts index f7946ad72d..e8981e0f9a 100644 --- a/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts +++ b/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts @@ -21,9 +21,16 @@ export const requestChatbotConfig = (data: Object) => { }); }; -export const getBenchmark = (name: String) => { +export const getBenchmark = () => { return request({ - url: `/v1/settings/pipelines/${name}/benchmark`, + url: `/v1/settings/pipeline/benchmark`, method: "get", }); }; + +export const requestStopChat = () => { + return request({ + url: `/v1/chatqna/stop`, + method: "post", + }); +}; diff --git a/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts b/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts index bb7bc9a494..fe300d6b33 100644 --- a/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts +++ b/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts @@ -10,7 +10,7 @@ export const getKnowledgeBaseList = () => { }); }; -export const getKnowledgeBaseDetialByName = (kbName: String) => { +export const getKnowledgeBaseDetailByName = (kbName: String) => { return request({ url: `/v1/knowledge/${kbName}`, method: "get", @@ -54,15 +54,14 @@ export const requestKnowledgeBaseRelation = (kbName: String, data: Object) => { url: `/v1/knowledge/${kbName}/files`, method: "post", data, - showLoading: true, showSuccessMsg: true, successMsg: "request.knowledge.uploadSucc", }); }; -export const requestFileDelete = (kbName: String, data: Object) => { +export const requestFileDelete = (name: String, data: Object) => { return request({ - url: `/v1/knowledge/${kbName}/files`, + url: `/v1/knowledge/${name}/files`, method: "delete", data, showLoading: true, @@ -71,4 +70,86 @@ export const requestFileDelete = (kbName: String, data: Object) => { }); }; +export const getExperienceList = () => { + return request({ + url: "/v1/experiences", + method: "get", + }); +}; + +export const requestExperienceCreate = (data: EmptyArrayType) => { + return request({ + url: "/v1/multiple_experiences/check", + method: "post", + data, + showLoading: true, + }); +}; +export const requestExperienceConfirm = (flag: Boolean, data: EmptyArrayType) => { + return request({ + url: `/v1/multiple_experiences/confirm?flag=${flag}`, + method: "post", + data, + showLoading: true, + showSuccessMsg: true, + successMsg: "request.experience.createSucc", + }); +}; +export const getExperienceDetailByName = (data: Object) => { + return request({ + url: `/v1/experience`, + method: "post", + data, + }); +}; + +export const requestExperienceUpdate = (data: Object) => { + return request({ + url: `/v1/experiences`, + method: "patch", + data, + showLoading: true, + showSuccessMsg: true, + successMsg: "request.experience.updateSucc", + }); +}; + +export const requestExperienceDelete = (data: Object) => { + return request({ + url: `/v1/experiences`, + method: "delete", + data, + showLoading: true, + showSuccessMsg: true, + successMsg: "request.experience.deleteSucc", + }); +}; + +export const requestExperienceRelation = (data: Object) => { + return request({ + url: "/v1/experiences/files", + method: "post", + data, + showLoading: true, + showSuccessMsg: true, + successMsg: "experience.importSuccTip", + }); +}; + +export const getkbadminList = () => { + return request({ + url: "/v1/kbadmin/kbs_list", + method: "get", + }); +}; + +export const requestUploadFileUrl = (kbName: String, data: Object) => { + return request({ + url: `v1/data/file/${kbName}`, + method: "post", + data, + type: "files", + }); +}; + export const uploadFileUrl = `${import.meta.env.VITE_API_URL}v1/data/file/`; diff --git a/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts b/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts index fd06d1d3d8..335908b6c9 100644 --- a/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts +++ b/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts @@ -14,7 +14,6 @@ export const getPipelineList = () => { return request({ url: "/v1/settings/pipelines", method: "get", - showLoading: true, }); }; diff --git a/EdgeCraftRAG/ui/vue/src/api/request.ts b/EdgeCraftRAG/ui/vue/src/api/request.ts index 91805dbab5..44f6cf2051 100644 --- a/EdgeCraftRAG/ui/vue/src/api/request.ts +++ b/EdgeCraftRAG/ui/vue/src/api/request.ts @@ -7,8 +7,6 @@ import axios, { AxiosInstance } from "axios"; import qs from "qs"; import i18n from "@/i18n"; -const antNotification = serviceManager.getService("antNotification"); - const service: AxiosInstance = axios.create({ baseURL: import.meta.env.VITE_API_URL, timeout: 600000, @@ -39,6 +37,8 @@ service.interceptors.response.use( if (NextLoading) NextLoading.done(); const res = response.data; if (config.showSuccessMsg) { + const antNotification = serviceManager.getService("antNotification"); + if (antNotification) antNotification("success", i18n.global.t("common.success"), i18n.global.t(config.successMsg)); } @@ -55,6 +55,7 @@ service.interceptors.response.use( } else { errorMessage = error.message; } + const antNotification = serviceManager.getService("antNotification"); if (antNotification) antNotification("error", i18n.global.t("common.error"), errorMessage); return Promise.reject(error); diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css index dedd20b357..5163bc195e 100644 --- a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css +++ b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css @@ -1,9 +1,9 @@ @font-face { font-family: "iconfont"; /* Project id 4784207 */ src: - url("iconfont.woff2?t=1754038546130") format("woff2"), - url("iconfont.woff?t=1754038546130") format("woff"), - url("iconfont.ttf?t=1754038546130") format("truetype"); + url("iconfont.woff2?t=1757469597873") format("woff2"), + url("iconfont.woff?t=1757469597873") format("woff"), + url("iconfont.ttf?t=1757469597873") format("truetype"); } .iconfont { @@ -14,6 +14,14 @@ -moz-osx-font-smoothing: grayscale; } +.icon-kb:before { + content: "\e639"; +} + +.icon-experience:before { + content: "\e68e"; +} + .icon-deep-think:before { content: "\e772"; } diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js index f6731b5c1a..5e96151e2e 100644 --- a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js +++ b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 (window._iconfont_svg_string_4784207 = - ''), - ((h) => { + ''), + ((c) => { var l = (a = (a = document.getElementsByTagName("script"))[a.length - 1]).getAttribute("data-injectcss"), a = a.getAttribute("data-disable-injectsvg"); if (!a) { - var c, + var h, t, i, o, @@ -15,8 +15,8 @@ m = function (l, a) { a.parentNode.insertBefore(l, a); }; - if (l && !h.__iconfont__svg__cssinject__) { - h.__iconfont__svg__cssinject__ = !0; + if (l && !c.__iconfont__svg__cssinject__) { + c.__iconfont__svg__cssinject__ = !0; try { document.write( "", @@ -25,10 +25,10 @@ console && console.log(l); } } - (c = function () { + (h = function () { var l, a = document.createElement("div"); - (a.innerHTML = h._iconfont_svg_string_4784207), + (a.innerHTML = c._iconfont_svg_string_4784207), (a = a.getElementsByTagName("svg")[0]) && (a.setAttribute("aria-hidden", "true"), (a.style.position = "absolute"), @@ -40,14 +40,14 @@ }), document.addEventListener ? ~["complete", "loaded", "interactive"].indexOf(document.readyState) - ? setTimeout(c, 0) + ? setTimeout(h, 0) : ((t = function () { - document.removeEventListener("DOMContentLoaded", t, !1), c(); + document.removeEventListener("DOMContentLoaded", t, !1), h(); }), document.addEventListener("DOMContentLoaded", t, !1)) : document.attachEvent && - ((i = c), - (o = h.document), + ((i = h), + (o = c.document), (v = !1), s(), (o.onreadystatechange = function () { diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.json b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.json index a8fea13f43..db90f79659 100644 --- a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.json +++ b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.json @@ -5,6 +5,20 @@ "css_prefix_text": "icon-", "description": "", "glyphs": [ + { + "icon_id": "687788", + "name": "知识库", + "font_class": "kb", + "unicode": "e639", + "unicode_decimal": 58937 + }, + { + "icon_id": "5299955", + "name": "experience", + "font_class": "experience", + "unicode": "e68e", + "unicode_decimal": 59022 + }, { "icon_id": "44419262", "name": "deep-think", diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.ttf b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.ttf index d49803ef9cbebf7c4fb04aabd4ce75212446d270..6f8585b8d57c230b0b38fec8f5941cedaf3b1cca 100644 GIT binary patch delta 1942 zcmbu8TWnNC7{~u}u6w_9Z>5)Ix9#pCETvLiDFhy8deEd@5nhr49OPq3MCwF>R{Q;*&0bqu|QvXjsH#e8p znEE2kZBRs%9&xUt5M`4Ew_+>YWz(CR=3`KzQ0GT!>K%aE1daVpySwoqbkuQhmD_n8 zZ{e-Hov-B^c_-h>cko`mn-B0Ip5ur3YlTJXLks?EwXyJlI$RiMTAtBA%QlMt+j6+K z0Uj(s2zIPME1Ho)5;4@H4iPMbh5(E$l&lR-v>*cujfkQF_hT8BkW_70j3B%)5k?dI zScx>^Bug#YQHA?ZgXLICqPS5F7ZN1bBKSyKqi{pJQ@E^}d6&>y5e5)XJHV+E(8Wkd z30)D~E}_eU`z3U3@G1!<03MJ~D&Rp0B?T_d5R@KxSVD<{NAlu_7Gl(ss|1uixOgE! zI)K+mND=T_326f^)(BDwyhTEKfs2i- z9g~oMPMXfZRy{pM|3mu@~*Jm+_*Zft|L{#l<9gPpof=d z606yYWGay~bq@=M6KWqkmTIXZtJqyapX-%8f+&;H@ z#W1d@G?=(pES_H+Ybxf8iFjIbdp)#)sq>r0Q}I}_xGUWaaelCipa_8NzWH$9PQ>*~4Qv)!^P2R94`!m8TZ>I&2iZg4HI x8Je%#=duO6eR}-dE!(v;>nKdLWdp2(Jpg6zQ;z@PN{yG#H8 delta 1363 zcmbu8OKeP09LB$U@62=_opw5%QuIMv?@>e1SqPdSA&rPdBOamARz2DdCPE|~B1Eh( zCBa5KB55p85~M4MNE(sG!ZXo^=6~CUjUZy7zT?lr!pgn*opaAQ_s*RE_kCw5eJOP+ zk&*5tPXYT0;Jveb_raEp7el)M|7T#<+ScaArufn8f;+sIta{lM`RL8z> zdMKTE4=lXFi9M%#E0-o=@m+H5b5F;X{Ka1o@^Atex;^jopMGp?EaS19BOLo-n^|7w zyJWV^8pwL>cdH`5#;}8Mem%w62LUGnkM)*suE#QXQ7jNg0#YIgsgXKaE%nkMn`Nss zNsH`|v~*?{I4g?&wNjbA>+H-}{58GX@jrJ8f8RhZ#-k89n2&jgV=AgphDn%+B9yZ3 z0(jsnip0kw!=I*O6NY)TTsILcB< z8K)tS8v3%9_gi+*31xDPruJ3S@~Q=doq?O8XbQIn2`Jnsq(I@8AyI{!hnO|OG9WR9 z1wo1wmIfK8ut>=GjA{5m8GEM3z{(*L6lwtZ4b^~{H9}n=C5lOgBot}|sZppJq)wrJ zkktxIA@vF})qG%-4OADhS)tC5tqK)~G%3^`(xT7>$PR^mK++1GfpjYL2-2m{EeMa5 zfxbZ=0U6WKLdaudc^7*%|@rI^t2YR3<2gLTgmdz0PgR5@MF15euX(!0j{ z&=>KgeBX06<@EcP_@4!;11AEbL4UA1crN%o)E(*%eaoGgyFcs?uMA(!LtcB{P=0Iv zyGUs?vNCd~AYRZL-4H!fSW$SP@N29yb}2SmM2C82gkHt1WY$imEc6^p4p_|EPrwd8 ACjbBd diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.woff b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.woff index e9b153a7f53cdb04c2fd5285bc717d763ccfdf5e..9e3764205e5172452a77bd26b78a2118b8a01979 100644 GIT binary patch delta 9211 zcmVpm=l}o!F#rGnHZ1I1H)v>OVE_Ok8~^|S9smFUBnRIE zyJ&56cmMz*JOBUy8vpOVAOTb=0E%v5ZDjxeBJcnJ0YCr%0%s7TJg{(Yb94XzBrpH~0jdB10)yn?!(NjJ z0gHdXx-l^^iFa4s+|B*AiMeeTTd}r^V4;Go*a>!8ih>rlegn&ev?BNc6t#ceUD3ub zV9x8Y3hBfmGyImBVRwd|bDkHt0W`@wDd=#J1|8A(P*)o+t~YV};(DRsOy|G&so#`> zXo{97MLC9|62mbPqcIlaF%gq79WyZ(3$cG3Yf+8u*opl(h|@Sr#Z*pHX*R8-^|X;T zQ#Bpc=4-39-P&HgP+vM;JlQ%whs(RSU-|Bb|GN9NZ4G)m-e<48&;7N-J^JXTg)a>8 zftS4CF%Kxw%OfS%Nwc1Ri#wG0&NrU%kf(g&BX4QsD{tuHJ~!!Mkam73*C$GLfC_&d zyr!SeyjQMwDbmVwCH{&w{h{V-d|Us^)|x*We_x(eayw0lsHdiEh!#^yM7xo%b(oSP zI!zf8U8XdNZd0yAk11iI*OWC;G^I}Tner$4jr`pKQzwX$<++Zsk)JJu?);nI!26}+D1&6dPjduni@ztIZnc7QK zO+6;Ijr{(0Ox-5-O)V!5jQlQ7P0c6HOfx_#nx=tNw!99eDbs|IW-YIgX~i@(q;<=E zNE@a}B5j&xic~dC7wOP6XQU(3#8I2Kybjk^O;bp1*EEmR_AEECUNFrl^(BAH>+|uV zX>K{$GTQzCJNP)+c$}4b3v?XCm1tMjO!rLBbocc1Z+@CztEV*@%}AQBEZLH5gJnxH z7>wmFwv0buFu?`_fxsez1K8#hz>tOA1$&c_1aC-42xQqu@FU=mgycAUvzt5~W(gYt zhp-83*bUQux4LI!o3Q8XlX`!uZ{4a}-Boq})s%;#jvvN{@j@z0X;hIqjiO|)R&JQX zsG76P9P1U3qM1B0y%wu`&4$GigznYqZXfnkbP~DfbI=2fz@V?NJ|AmO2YGuPKFG#^SDuu;0p5RMpFbG)_g$C~@f6E&A$c$M_yb`z$t1OK zz~{kxWRdf-hZ^-Y>n~z%xNu@cqqb)KzpP(duiMuyIbYT=u2g@5a^n0Y!Kjb%gw}ZQt54OV6dRz@%DUR_fqg<-uSVtQ$_glbY1 zwtfU5-O^#(8wT63yt?GASPsxI^%~%5L^ED=nJ4J;*|&Qcuh+}ixBK7^?eu!>S3aD2 z{=*0ESpJ}W@F0JU4%!#oHn#h$x6tHScQ;@BVDy6z@C3rI&|Ju#@{69AF@it*vPblz zhunH|`nEs*@wP4h{JW8@Pd&92nGaq$eN#Sv)3@dq9)G-W3v4sR!rb1+58@DTbtlzF z4O5fUCTcUx$;b+H2U3z~2$hHFd8k^V^-Q^5%^8`jK=6OwOqNl!ii5)~oLjR_shKbm zA|uf!Ml#UYUZpc3#*1-s4~dD+-=nMai1ly6bYxVWXGJHc!52*0@_-vO5`@_Jn36Wxg)0b1uf@`+rm6yHbs z5~6+N<*0v_9N75pahCE?672gZMVZwcnRvP0m<3dYfmO-D)apaXS+RPfQr0k2K+puz z0Zm30n7>|5A&ovJ$7Hy6DdeV%CX{AENhFk4xpHbIRpwq*5?}VKyJUWbmv^a>c+8*h z9}_q5ghuAw+qDPOL_$3PWSyOf1S%`XctzolS>k^&jync#PQ!aYiJx)w)m(VzYzfsz zX!rRFi&zF_vJQ_9)38e#d4B-`13UXk(5RdwbpfGoNr@2hFJ{q}a7F3~hwVE*E25~~ zSzTF8)W>ot7&VNj{Xs`pR|ooaA|Fa*5~1qKKS&)};ZhE54u=cjO4#0iTEPBkWn~rp zDmQ;tPj;3K)RnNaiF`hR_)sDQm;J};$_iO?L<+o@{vj2EN|w#$s`ZMg^&+@C?++pd zxCVXid+&90cf(T=d!3RWRGX^$XLT^IpgE{}Fpm&(ojTYJ*g-&d52`JGhq3R$8ptu> z_kL;~HBK#u=ae-8!6q^%7GW4UOW-Dj3>&NVE3lun{;CR{paP9hA*%&z1xpGm zr@LHH&Cvy3Mlx?7@&|){ddeR}N;F(HnhzP}vN4rDoF*3!QEg?`% zG_nmdlGi~6!tG8{PJ8$0Ht=m3m16TiD_u{F=g#r}akyAQ5z?y$kIy(S* zy0EyrMbz4l6hmco93jDe-3RfPpUgLOBu7O)V!FBnUZO)ww18et~^y66=m zkEAE4mfSF?Qn1EgN(n{ILYw&2m#z}l+&tlnr+OxendIC%?p?fS$gqkNJx0nue$&R$ z^%W1(Gtm<+Zo6b#F@gXL;8=fb!?1kz=Cfr?H^vqm%zy4#G~hX55vE8>KC1z4*O@GXBh=;+-TNNW#R z+hJ;qT1Ks=&Z91(E~l;~dfnAKVks}Z;AWWZ`SS$~0PoB{yXpTOvCjQ;5u0^k^p zu?OReu#aJU%sx-h?+^O!WY&TzG}vuX~?1C}*leT$6m!B=30JYu02`N=a7g91`8 z7#d|oRTS0B{3sj=ha41Q0KFP(oyTigLAHJJQ_Sb6g1q0Xi?sdTHc?%IrKoPyd-pP^%ox z-U05YrnuI)Ws85wOM{4cLBNQ)c0Ajb!vh+T2Dg^XYM3klY`!yH$M*%91L>q|^dAIy zOps+^OpYsK+)q8eXr2~@0%y9hvfej5gAw&CqGchI7;b5ZD zrP6;sg+5M3{C8_KC&%T~;AH-(%Bn$j+Q&1PxxhEDe&BxsGV}#7hCX&$P|&POg2Est zzXY990Qo3_?*`rp(RB^bg*9-qS`N8FG`|i7%K-C^08H9*miH6dvI1@O2I!h$RI`Q= zTG7{g=GoIMi{&kMEZ=m_*?s-_JjZg3i03hW5AWGkoX@bHz=!xiv9M(F;mPqru~-3~ z?qJq1vV(K?0MO~7vA4(TW9N5X1br|@`KgFwxhdIjjr9;hhGel)Msi&ja7Mul z*Il>9a^OY=%ro0J$ z@RCbV;N!~*N&3H1h2|Y7AO_&tPwt2ml(Pp<;_u-uDg}CK)ZsXt!~%xyupOHr?AE9d z6Kj#E02yor11}kl1FgfN>sq7Y8YJLRD~DI+8mN|gM!cL4OTp&zq}&?%j)O!Ge_woc zljMKjy_c6H{%<7zWix(hpI`9#1pnvm3gE6iALo8t#KwFG`tI31XVX)D*P=W^#%{1h3}>7tB%X-j)oJ zT7#@H0lcmcOax9qnG+R-aimCv$XBh@AS!=sn+D7^-?eHri&m^!q+qM@&REQhq0D!} zu~?WCeemmy$C~#DM13L|U+4gey|;f+e?mJ=OPs^{{i2_vBlVoUBUi8I&=(_x=ucg` zpF$BgqmQ7dNe2F!B76tvPc7(8Mt{E%%?IwdBarXr1kNAwQ(jn;1Ne2k(eW>8)Es~C z6Dj0)4{0JZAtGgIA~WcO_?&P+*qWglQUxYkk-SE&qQeirzF_3y9rnjNE*@DxAmkyC zoxWtly86ICecguF+Q6dG7hV{B!G3T5{+IUeM>E%-O5s4-DFhDi-qx+hsUt@a^~}vT z-`tK}9)LMax8^YU|1$$5WmH40IKO|4QG=~#oiL8fst(Qwt^UvFV_%3mUwZc0U!l%P zzb+`Lz~mxxPBgFj|HpG{-jDVyShe7y1$UIeMzjX;{RyW!Tsur+*H!@ZI`2QgaR-0=F`3)5 zhm+A=&7(wJ(|u5W*FF&Ii8*l@ns$uR0Vh5KB$|dI)^nIp!T@s2zjrT;alnoJ>=|gC zArwwQp8#hZvRIScDUe9&_{;bZ=y!?=Qas3KjLK0KRiUb&Y39Rh&!E;&YpL^LZ!7&} zn1Iorg$YO}SI}(G0`i(Uxq^T6e&XX>{UkO$rQ&7#8Cbjgewn?BOUJz2 zRa{CC5$jL$XozQE5c}B>)wUg5Ax@7$6f*=?O>H|`ZT>R|zdClqj;*&y zu*QIE^5=|>_Uf5bC6#}wCT$^EO`>_{Wg;jR$z&q-TYC0_^W{bTtLI65bBg88Z2UIt zzl1dHNL7-}{m?yGNzvPTMm>5aqt79Qz3RdX6Bp0FG8fkh*}ja~v8ffuJb|CVi=610 zLK;~|cP&NG`y|L@3A=!WV?dJtGv$apl2qX^B0t$Ne6HI?cgTO-DmE565=(n<==K1I zvz}B;lOGY%!XuGN5bBr<+^&R0?MNubt_oIG9wuT!3Ot8X{|OX$(o;-mp*w>q>1TWoX+ISU46qsP zX(NW_+g}s}feXx-Ue-jBDq0+kL=5GHMx>)-GfVgwRJ1}w{NKUT-;4!NskpEUr>7U!BFi#_<-v|RKnb`$XSDz#0*=(? z9C=8!WTGOwIaAjQqzgU~Cqz9w-zPJ37>rXsjve#)k(cu=#F$~S7_+0S`3kA9XCd5t#bQMVa`E(_htQTQ?!W(vWjJ`G`HG|?Jw$&$C+YU(_uqg2Y`nJ%`zeuCnAP-;T((oh{tMS@_}jYmOL^pjQP%gsaKXf!+n9{dUyXg)*i z)kA0|99_|TWk!ldB>lh8XEPsOX$89U~7=Rr-H#oH3p! z!*zAdg4V}(_4nWFuVv8&HJ8+xAj2_iP?VHdR<#crRsX$rS1K;=x_)(p>VjR(IYBV*ydg{hb%QU|*wfa2A@$I(u{ln6j{JtD>is_uWdusJF ztEaxunIoR?^&8i$xN)3b)oOo^$?mfc;A8OY0>mVat*Ap#1+k{9WMBti`AMeMacvm3 zL|emjqpW$M7fGBGOHr%i<)fpcdX@I%f1Xy=!7caQ_Y;rjC-+^xMvdr652IZ1w>SOr zI-0)jmp5(tmi=Dd!{3xn$6{A|=of)bRntGud+4e@xP=e_DLpKDn7n@$70+>K2<^mI zJ^3HX{7tb~I(_xG^KF?wfS<>c?z_vDZsi!3ShQhKob?90IJ`cIo&@)Z5ndc#KMQ(> zRTXe68ueZXfFBwe8PO`Vr|TyvMVYhdOZOb{c#hn2=_)0nbznxh?B3lkUQ5&0zPNki zAr#DGes?Mri(ToVp9X(AMM?jdyt+1L6UINf=bj&7G*9$+^IAk$?$8k0@q?bOr^5X1 zSS*#g@|#_bk9HmY7w|2^V4+WgRcB$x$veV3kwj4mWswEjU;B%6O2f$&nxB@0rguFy z6i8_Zl<8C;6*$V#dK`^BE6!aoG-u=Hw>NJb7#bSbxcRe}Y&w4`S_wP2=8VanVzFoP zjPEU->?xH1pohOV6hIJ8(P%{gaYq9wxw-FIe(wD%KHH{-Vde}C92F8>PZx_5v!qME zcg94q*xr*LgYIlVEU^r9r{PSYmnQG)9Pc*jwMy&M&rw8fRz2wklJ38Hl2NxZpxWGX zt|UZ@cuh{1JJo+eP?p$0j4#G|BFUVjX;O2kq^eRb8R>}?`6%n<6@QOzsWFS7=|`b_ z-hMJHZV@~E>qR-daHy{@Ug9N@4Fk=vKi8uk{V9)5Bnv6&{NcxE)RY)gQ`Ykm@r!TO7!3tf% zR|SFGu0g!mlft8V=uBb>Pbn@;s)7`WNRin>P?KrBd5)Td!M?2~?I-MCFrxTX7zb`J zR4CQsYIvy-3`D^-D~Ct>%=DVSQRxk;!>Swl5Lb|{1t_&+hh|_UB+I6pkIG*dmGKgZf8T2!^%Lap1FvwsKx+J zm*YEk7NX7HWxC8HorrbY-km$q+4jTe>~DPI8*M&c4ScRr5H6Z6f4w@4D`2b2NsQfu zR9VBT#>#f2GRDQa7WPaW9q(D#75Cq{>wCN4+BmX$^+?~;s^PBj@vh-jQ|Kpa2zpoB z2YG*xJ_1%V=A5;$SU{Eusw`*KOmg~)OjZSX!qXl^_lYuWa|~x(5ZnyM3+K(>||1j6Vy0h3a?~8HjY}k<&eI7GzJ# zs1*s+s6@b;KqIivtsIVF=EylbNx_kiGr)gV)k>hp8cxV#k*`x&y+J?Pvovl$hQ4?t zZJ`&M@6vD4)s^-l^q4*V>tFBNhmO}4R##PZHNLc`va(7e>v0R6Zk>L8E_XfLkoApu zMvDHA|7HI;GS-Fc|NZgIclws~((%*Qop$cRhr9aEDW~T~QbzBxKCx@;Z4=Em7hHd1 zn%68a=Ogo=<1@h;-hmNi09Q7sG4j=k**<)2xl2Zdux2|3c8?Mbpg#^t3%nDrtT;x09aD(Kyh7-V8eI5RYbh zM_WiJ=uvwbLERYNv1mkA&)waf}H=O)F8HId-;OZ~|mFQvTqZ|f5Ugh;p75HJEYUYcD z=hyP{d_X3d+D9PLOTc}p)Y27?FC*z@Ck}=WwJcC($P_wTTYfc`wgSN z(=E|nJ`xXXx4$cJhdJTf$$ZDeY(>qaBu})J6D0-@lWbUmN|Vg5lSzLf)}0(GgSSFX zl=UQSLaYTrhld*3KHsJ$Mn^hxgXhoh`Rg?kU)Z43rporALx*Oj5Vh!v)AC&l7cEc>Ko)E_YtzXTg@ogKyosO13D}P^Xf}V~Z?F4lg_BZ{+)M}T z3h~<;gag6N65Gd?P8JP#Q`U5uB-e7i$N^PMKb^WJ#w#9R9Xa zl0#TpKo_E!OtgRb@&XBmRr}@Wq*Fi?^_eSm)G>shbD#1=ca}Aa!hrf2Lv#YPP?5wBG zIdb^LJgdc=0uNeSXQctl)o1vVI@U2ADYZgpQSV%$;r% zIFz~U8+^<_&y712q&VvFvIC>OAmZ3LWmag#?GHeFwUIg#{Gm zZ<1my`b>WjkRLx*LVtUrdkKA}RGN1B@7dEL`vm;h-dgTAX1#9yCRf}0nA;3=e>%~_ z_LoW}XCG(q<6xsO=;HwB5mWNwBJv_^O0Y~>?b`O}9V z+HczG2k6}vFMJAz$h^_PS;`7LFX0k)MB)9Blb2M83(<8TpZAsz6^%XvWcY zdp~{c+NiQ;?PF{ADABd>cXFlq8l9!<_I@ONTB}=u5weay_yhbyypYl)JNVdet&>G`f1*7V?45mTgx9xOHfFcxdakt2g_CDu*P$-~PQ9FBpc_51^aV z_5nxfbeszM?5_lZ>^B*W`KIUp0q6Qid3c;-U}Rum0OD!f1^46mZN4&avoL_blV?4$ zF#7-h|Me`4%#A=U2LlsG6aZ8}43?9TARB)Q0HG2Fp#T7QoMT~NU|_+98F7e{jQ{_q zjyaT>4*28RD& zJ_7(8CIR2GVv7r~p3sARB}-RVa(0v* ztjhjxXE?$I=!+U%{gOB)x&-j9`_=fNJfiwKfe~wnmincv@O;%E+h6ovq+Za+U6=4@5t;;dGbg^MZ*p~n44ma7HwmPzjjfFvuEtsb;m>3G5 zMB27W#XfSQe=QYhBIje_$%-`v?jH%E^0}9 z>4aBoQpTq7Po!~!I;8U7+L+l=1TC4iiniqEqXtdqA>`|E@mK%aHOIXRkxw7t Re3A6(bIP*6{lmIk003HgyNUn+ delta 8800 zcmV-mBA?ydOT<{EwC1Wl&Sz|SW&i*K zlmGx6%m4r?P+OY|YiMO*WB>plU;qFBF#rGnHY|guA82T0VE_OilmGw#9smFUBnRIE zxoB;4cmMz(v;Y7A8UO$Q;NSxP{%mh!VE_Oi&;S4casU7Tax!rzR&8N;Z~y=yL;wH) z9{>OVAOTV;0E%v5ZDjxeA!Gmm0YCr%0%s7TJg{(Yb94XzBBTHS0iOT>0&(PYFyoU5 z0gHeCy5^!W#=C1Y_xpVlli0>qtR; zU%))C$10=~i_Gv_W@p*Co%0NE4Je1Vp)oewD8&{H&9S$1zQ0WK`ToYz@A&?mZ}Fds zP+3hHP*sB()`&(mrg2SZQq!8zoEEgInznzmtv&7QM5n2es%a|CrscGj*3(9+rNd&r zSSfaj-Fjnv>1gqI^Xv>Rj@*8!BOmWo9u2fdTGwgfJzx0DBL;ZL2j20DL0;0q?MSJM$K2s7uXzyH*~buVyr4prH<5qp zEqWr^C*0&Yt#KpQujmQ=PfJVwX!v#UZ78?V@-He|#bi;;TYj$=D;Cp6v12iJ6uXw+t@TEWS){&XF_j!GTFfWMn?~y&!(e|asQ>@~ zc$}3w33wdUdGqG&&g{oqf(SkWvLw1MRijps!H`y0~Cd{lIGS_MUolT zEh46yHj1b!Rcdu(7$MEn>Ly#)*}Cc0BzM`gfx4kp0Z${Eaihy!evikx!_BzeZpOO9 z1Al0{+ikt}@#IS%KXB*DhpYn!Xmr53;P%m-E8am9EADB$^3ljgAK`y-gkPh%fHmnA zU9VyUfB03G=tU1Z^~TigAAWfImVf&7@YZLa-HMEdu9~_zm%I5}^YTwUmA{pOKNjZp zTl^3XP%70<^-x391ht9U40F;m0^Nq>1R6l4A$l&V6lpD8s#UUjIwKIgJDp)X z6X(>-(`p8cgvdzr$&r6_G`d@EkBhNFjNAiay!|&QC%3i9*6Vzj7c^9}URT=M6qF-n z6Ul!@t?}L6FbL4g&4COVi==w1jW$BJ%0^7w($*R0)mWI{+?H#D%My7lrt7BA$k;dtwvLWA^ow{d5KqSgmDRtK+A_jr9NHWV z=7Z&+wRfSb2fXjdSu(G;L)*O)n@1~!pqEN}O znQWz2Hq>qecl-T8!~j>P@B83`w$4s?Dq?StbNxy~QU0Lx=VUYob@t~FVs23SI|17d z=+1tn$?q`sy;ub~Cj8z@&85bumGGRBDj?WI=Hwy_A$tkzttHZ24#CDMy)x`)wYQ=` zC#XOpRLFm7!CJwR!pdn5S5&feo)4iAZyogd{a$*~>ql}VSkfC0>!p%DnL3mr7aB>W ztV1dEYkeS-8CW!+!#$eKRt*DFp;*xGZ9GiK(OSQRLVnApr6={0byW)f?j;3XX%a3P z$cz|OLQJMf+jHHAy;O!;1T(Cuuz0|Y0&G9anAv}BR76843A3!#YjxnydPq}Y76cC+ zSm0o?(y;Ep)k6sS)HD_Qn1K6I59cp&Wpm+q>(-yQ$Song6vD-8t|{cDF2B!nhC4_Q zg=?=Vm{JEIrWuJLG!C_2NZb+&MmG$F&f0ud2-Ee^#r1#m z&PS%Y^4ff(Fj3%pN|A6O2vEl&tx(wT$d?wkZQZdr-qtly$hD`^>6CfSL{~VxWd6wJ zwqjpjvB_`rB>fubN`>qPs1+i+p^|MH9U-DAVBp2T!!g6C8OFiVXbCR5(}-CI0Tn~5 zVuX1GSgVilt=Mnt-6%+F7g*aNYLtIkPOYWRr!Jzdpspv+s5wfQRhtm2!oHpcn5`!B zk1+Mq0O0zQ7_EiTJ{v;-9HlXKVSEwxFpP)U$^rYLQ<`m{_qoVkRL!E(lp znwaQJBszsax{yYh!r8(0QhR&pR@tnXsH65MXvR*I@1NBd&wxy2sD-e$#6mbGt)zne zwzc9ZbzoTAo=6Txk?Jk$0wRA#*ckM69nWp-(EIfeKf;HyQFJ0^*dU-9&u}sBo}e(o z2k+tH-foVwKB4_$G~lPLPdJXw4CoIC!Jx2#aK#1!Jph`KzKrAcqIqBhdF$D|99M)v zfX>abZW?)@GCPki(tn`h)EZl}x1lm&M%x-UZ83Ri5HU9h7%|tCENFlAro#gok*OiAz?HWlSj$>jLMC-vLo3y3G&U{M1QjTtdc_e zB#D{f_`Q7_>Y;cD+V>jqNs$iv2!kyMPRR0gR!IF7$s3s#%Ux806$3*mU zT4OB7vFJ$;$9aEFf$?ZQ*!wO)1%67Z;*#jCS* zRLve1ui%4{zwr_&w+0@wk?4`X6<^;Zd3WySC5itt$$NkKj91#@6+9lnd-dHu+)>Ro zUd-0ea4-@?4U(bzo=4kz)z%*?LDN3%mjakl%(#ObKm6wW;Y+5iL4XLLQ*uV!{x1OMm96{9KTW-0f6}vnDbC_z*VdDQY10;iO*Ug(H4mn!0BTQTLc}I5Ng@AlU9vr4dVL~ zPPMsqh{UeV0O(ELyPxCsd!r$4*DfxE?rwh^CF+{)f$}@n{%BX!j?2)LZIt%g@fje| z6co{}Lxd6r2t~cScf%O_o!HNsfz}y9VHfmCaK|#61VCqQl zgEA*WA7K3qG}S{S+G8<8Qx?@>-hO}1hNBzKx!vqI1w{YGa}NrxP>O$$cXK|O=XkgG zO>Zj7aR2H@GZ%cizYSWive$5_sGGZnO9~=ly(u0I@C*!My%3;Uwqq;A=~0Mc2EeK* zEk~=xe>&k;+io}gC};qc0F%j`kd}-m~62vTp)ato_8W zp{bqLOCXjWM@PSb{{kmLccrLqJDXT%>!yehEGDB8e+;?VI$2t>fQqV>5nC5&4O%K> zkuhnsS6Nf(|B_|-OZ~tC6TekA2N{^$wE~4SqH}yT`Y|9&e(iZI6yHILMoQ7n=+(f+&i1u0&I9-tUH8 zqd{a@hOj)?Q5%0K0@r8FCO|~MmfEZ>56PxXltWI=)bRr8yhp@wQ47xVgqS%D#z_yy zj(WVv&3P7J%rF^@*%4Oq%ntyU?+T<0wmsCs-8;KM@WJFoOhYa9gj9FnM&tjezB?XJ#36I~UhkSyt5Gbl{^5TBa5-(@{I#nB~ zrR;*5UEj?u8A6&ZK~C1ZjLIepz7xIgh!$wv6wtH)+H&Q-eOE5W{v(aoBn@c+`umb* zU9oT9zS)0xZwK~LA{8QWtCA+_2F$-^CkyK)OD{*^#wS+8??+$vr+ui;=li$YPuzCF za^&)*3C?f*z?boDzwN~Bt^DI<^p}bC^*R|aJM(Be6!?5=b09Ci5lH3(e}}FL)Xz2ZNDFa0Wd1RSwWNOzhReXeJn0)p%`2ibNy;Bme#?X(q)8^aV6Q1{)5r ziq0RLj7S#Tt%oCaB4SlUkHFPD57>$S15Z;v*q1PsruwOKs43!IoO~(VN^oLWYIiFO zjvasF`o~5$WTsiY-pn=wuhZZ@GWKDKX0T}!J#s(ri|>9;6t55!rA<{k(D$uA^oo*D z;QGzhbJy+3nr3#-x^sVY61-5cb}8E~N_D8oG#?dn6~i1Z=!sNR%B3#0`on06M&Bx^ z0$kR$c3MP1XMgn=d4QtOe`b&I5*ePvTsj0P-H?^CF@#V?2 z=i82Nr@7~EmPJFq4Mpu@Dr@YVTzh!!M!&6~bueUNkUH>Xn3=(R5T6`)g;)Q@v6 zx}x=OAw)n*4~Z@&r$)rHZ5l#5{-A#=_k4)IIT}r+uKjkdCG-37OL)S0_mHWXS%xJR zZ3q-+tqv~^uTP>U0ol1*cyW0B4ConFk-@E~*SaA9et39zSS{18jvppvdCsP<-h0I5 zI&$x2Yvi!nh8g+t`*yx^JxyQ#%Fc}kkw1s|oylZ0dXYRT~7=Q2H zd%uU#T+!vusbOKIO+#qM4!Am=4e~pq(PZ+fZ+6%|+70-h!M6;8g+msAlb(@9^_ca)>G7#e;-9GpKeXXEB~H*f44 z80g!$`3skBIx3oR%fIf7iLQS_p=;udCzefg6^j7SLr)C&5QI}STIECBQC~9D*z*EE zxNp@LTGTMioPoZhLcHU-LScNCblDSUj28;6J^2CX&N{>r%RzVQ_7u8l^1e0OyR22q z%~L;H5jk1)gcC?Q|H>&w-AsdObI!RE5G~?$SuNDAJ=;?_Sc}Zmb{veZz7YD}9 zms7Bt^7&%}#dwMd`n-$~@^{9&OGBe_T868)AnV?h9Jf-?#X>6{einZn&jUM=glKrm zKINPhP?G6Of2!_wVnToC1aDTJXX~dXM`~FN5;q%pHmwEyW>nHNi4@j&jDRt)XLMFc zgrzS@+Q}lBPbC!k9VJPpF0g*W3LV0OfrR-&PXV)7DQIQG5`_fg21JNOsxz9_Q@DZ^ud%;OTz}*nM&erj)Br5~^HM zQB_y$^WUnT$r`ZXHkzbFbvp}O)k(s%Y@|Q645rTNp>n9KOJ&w%@$No|EVge~^yBXX zl$@Scbu0%YZToh>_Z@#o!zVVF~ zpRWZzS11SQl1{U`77`VpObyC1vhLyR0~JS_&8o4)490pYca-J0e?{_kA8-tB6g z*B`=P0KY=9J&QC%x^v0to;w4wC#99L1Zq?wU{#n$M9G;}$5{T?#y;=l4 zR<}bQ6ZU^eW3@W{c-OL+^(6Yrk(7yEZoE&wLswQ?i_w$T*e`yuXAe42T~Jw5(UjP- zuJY;%jm)P^w8&g^V>Wvu+>rT=xq6cR&wsFf5FYJ7);}Jfd8}u7HyvAe&cbsSJkrs7 zb}2O&PU_vud&G{>cMUYpn17vNTsPmChl~TZ&jf#Kcqc}b4qREMMyb{IDTH(Q+H{xn zG-1tF4D1|7AegOlCef12TG{=ny4mu!n!}-^)^F3@-D!G;l-6(UoQ_42@+g{W?5C%! z5mHItrY>vi;_IIbzA)9z4-96GoLP3vMQwZwzh*0!ESGzwSnhNnJ>_;FdDf+|- z`#gVe2IgshagjCaPE02frfI1ghBfU_8Z01~`IdfCHI`tUY`)UcShc#H-zTGxFAy9Z zCZHlcgvh=#_^Ar7wJX36a}^_3$S1}U`9jW62Gxq97-7ClGUQH|3hC1E=`NZ;x>$RY zlPk-pXT_RSTnVBNPHE>W%^{5D5acOKlQ4foyE>?rRki)IyTDU^nhK_tt{v9{$=LYB z>{+o%FnqlT$T>SB`FY4L)ynGik|k>2_o#Yj6bRO>sHdh3iZ-2%v|udK0B zbAj@xZksr_Ch0AfGRYC_5K7P}ieAbTiy5l|TaBJ{W5&-~hdGF-(I5v=d^k}`z-52^ z%D$H(x?a+Cla*9l|VXV%S6hrm^`nOJL`BcK*g&ya%>dkYiJ_cM09QACELm+fN)hxmU}gI*C= zR|$j5=JM!I)OcLAZsg}K8x&Tl@gwnk94?e|?!DQ$ymce#g8rn{#U{1hQe&jcdNPV| z6T#jjkPc{qz*npBOh-osde}EbA%n#cuf}3svDjU?4BRrg=55S*)}~V_@^RbucpJZf zzo2?v6kCAN+&?JOGbW=PdSB)OLDMmks)J8xOT zWxZ^orC@OnNUnfV3=X8r>49KT3Am&+>F9fAfREgAZZr*GR^p?#oEJ?$st51(imKYh z`~Q;XyHr)=?h4UMmo9Dh27}+0i=hCP=F|B|Ivr`eI$y$pP*L6#P1ApRCUFxCj&vRF zbSFw#Ki9f=7%3W6MK9l_AtPn|*V1NQ^9X(%cLMk2Acmw2^RxWpn&TiN=X)dP3z~YC zc$HwMpt%amlf{31ti7YBxO~O4E6(ih>FGXm#Z><#g|~P;di-}wzj*g`OO{-B_ZOF5 z&kC*t>-EsAkBMPd;8=fgdT^>mU{hu@Z}CwbeSgfRAjMIao9!F%_z}m>DX~H`Zhrvc ztBur|;16x0K1W>y$XT^eowYtK!}7o3sXz$|;ehO?927T-UJz_|1TtIGw4T+Y~Us>mb ztPSV@I$$lKt@F{vpIGuIwEX*zKepbsl#kH+O9F1Gg4*RPMryVgIsewQ3s4}Ygt8gI}Ux@PS~(r2|g z6&N97`-9)b&*KG@Mj0f(HyfAb(aC77j2+juZqzEKk)_eK+qRGoe70=67Qn3oLqh{w zw_Us0<5xH&dA-(e+<5*Fw0;EL?6!|crxTRlV|~r%XTLnjsLVHA{{z^l>F0QyV_;-p zU;yH!UpD8*^V@u7;AUX}fhU^`C&K9e|NqysFfum+xf~2kAW;BvI}Iz786q2h0sx^9 z2A==`c${NlU|?WD#*Aouvhn}_)G>!L^8vaj1l|Au000000B!*O0a^jL0pJ=ye0C=2ZU}Rum zuwbZT5MTfSCLrblLI#HaU_Ju?8}I?$v+p8L0e`|U?E5yu4hF(HJo6>4tx0_9+Bvo} zZTWhVCWj~VU|-47)s>taC6g7&|Lq({xPlZ5WMIg#M1c|&uHqQiaDwZ&ft$F6+qi?f zxQF|AfQNX5$9RILc!uXV#Tj1UC0^k*-rz0X;XOWp<0C%dGirRnSA4^F{J>9~<5yBB zqJNIZR@-$coiR>|sGVhfXPi)VN=nBxtaX=l-fg-8@2#u`EGYzTogy|nY0yoZk5$?> zb*|jjjt;Z5{oRU~%Hde9t=k$>4P{{;B5m?9x^l5*M%ae`X{VcPN;@6d%*H}5#}>>} z=uHfnA4J-w7>a%5MqA2g*Et_EPgZOwQ-5SAQXVLdd382Tvyg^_ucYhtd}d5XCky>* zEB0xKt}CP75FgltRn-A|ieAWAhM`ure2kUt@M`FQi-s)u!_t;BbUy6^`81GK`P7HUcCAfoKbM00bZfgl7kZ0~-`EMMi^-17ZX6D56r7%ll@Zl)+`nF6jdfxVf;^(BE;B4msx{S zEyee@H|-~jIiNtIJhL!^#Bb1D(1ak~&G&BpH`8`)23SkTRY9IgWz{>vy$(G0tW3`S zCA)u-LTv?!R0ZgDNyzsj__)`EBfc1*t`hq5QX2&mZbdZa!Dy^B;iF?YA;16AS+TW4Ey;_<=dmyMEC&!OhBB38`!3R<6ksk1iZ)%7GHsg| zzZ2iQi>hI~`!*Mr92XJxaQryB@oNH*B-7v|NL{ee9kJ}JyWnw zah>;EXf1Qqc1^h?m8exJsuy#~mkQF4(q4SQem{9&Gf7fK#=7dx6E~0fWe7anpIIMW zWdC?e-x1EID3D<}Wvy&lK1oN(&eLU?M$u2olmBt+Twyo_T!Ufjwj8*i6U!u>nMJOA z(=D(h8lxk+ht3W44c!|W9{N~m>pfms%gR-yy=*Nz$_Fp!Zh8IU%R8^e{Qdh?5BEQ6 zXfAF?*86|c8Ygb|#eF5yYu}*NfF2FDZCN#E*0>SF`i#kSsWh9HHd(i4*OXC{Hmq5a z+Oeoxo2p*ZI_z6EVf+Jc$bn7^25nkVXwk3Pj7Ia?Nh(NMGo(M59T31qD*?v<;es9l zo#4X**d{R8BCuE`aF`=JF-v%1obbj7;e%lUpHK?|Fh&rO69RM*VpND=B!mJrLWd?I zHW5Bdz#fr~UBZkh!h%sE7n4LjHi+q1BNkwZSb~&j#17GcMWP$s#5uGPeW()m&`S(s zn)rwgN^k5_3b0I>g9*yAx{Ss$)D0n4q3!@-M_nhP6?F@U4%7`I?xSuK@c>w%J{JY` zSkC}1C89T>YF#*m$0E793rJ1yoNf^`W2mbSWA-M(0uQ0Qk6?3 zjHPEM)#~s}ZaXyvW$lEe`{7)1mJ_48!xAS~1FK3aRhdc{T2Qomg06()Sctf;Y%4sb zn9|q;Ac@tM4<^xrmnGS3EUWJ;OCCv0Z$Xv(lkXG%Kq9fzh}1fLR1G*0|9R%hwF!zUtG_= z6TH0neDmm0R$aH%z~rJ%LiGVeG>WkBcpMP3Zro#pRz?t~(~Q*oot!nQilcBIiR;@b zT^?6uLoUh|P?*wLNix&v1iCh~loUTjd4n>_8R5tT_Ac!m##fOQDSwYb(Oaq_UUJ6( zVtO5;tOk=%(}cf5lM-k?3b-~RD)nN5UEyJVPC@gWko}nV5U))E#JIQoq#|0<{J^6| zz)vC&E{fQ{TM95<+^~z_WF#K32dPm#BV^*5^+Ln=}p)KO_LjH5Z z8*vScGC{(^u{hA41$8e7y_Yia0rd0`_iQ;hksKFl0-CWa6(oP#xP4xnyQSZ`pk2KC zW4$HX#+KGm9V@lS1b!d(mn5pXH#+VAZ|iDSZb$LZ*3*cq$_d#MtxLIdryKUSp1@L( zh^eQUJ&N%u9;$GMM&*i6X=-y=AZU*2UR@LF26TaV$I)>V8F8$2yLuR^|BsDme?%8- zmv<1u5b=>)VIcbtYpAeXit`hsOH8kYA=q{-k6Dkbhc3K46Z4(Mcm;_qbbm7y>fOi6 z#(i;5f1o`)W3+*yrFWpEE!A)|TQ`M8tnDjabdIT3#06-abuR-0Lw zg6`!b%1@0P|B9uia$9RMli~qN`%K>Axm#$n#u&Yyy7fmKu!QuADHX6_r(d%*rPM8e z1lO}6CEQotS4K~v@02A9hHHNk;P74u5K;7VYFYKB!X05Tj-gJg7Sj%ZGlba<#5Xz% zaI9h)xS=@kp9bp6B@qqC*1jMUGs}j;*XiAsTlS^)lMAK0>PP3qkIyH=a#U$56pQ1) z&bzUSxCJsU0QZpTB)vf@&ZgH>=Vap4)|^a_Tf$`5g-Y_Ihics4-FJNN8B}LhPD+_m zP;djR2YZ_F(Vu@4)#9Ko5!{tg0*ahkM`QA6wv9lsYR+!|9#GuJu{?y9W;V`hftF$2 zFBT2<4Dp8VmwBi2K9b=9O!xaP0Dnt+prv!uhXICfvn-KR-!iqn>i??4seh*aeWBMH z_n8`eG=ZQ{4=N1N4kL{n_W2a@fH!)>Jis7Hl&RW~^ggaD>P5AMW!)PG-`RAncR8ge zBtk9Wmh%c_`wX6Nk0p3xCM)W&mxq2Z#1*dA7EZI9enIQgAThP3VlBIcNb^PdTql)# zj$2`r6#m5PS@@A5ZVrCtdGN~WPrRFf?>Xk@;a8S=udS-Dr;cfqO>Lv~4rrslYD#c5 zXv);8Nai6uZTB(-r|GrPf~mHV2)RBF5_8v3z&93lGzXh&TSI3KuoJlh%^%Hco%v&K z?RPD8`!h@(VP3T5$+SMbZA2pG$aY$pI#fHtY+WL?8#}5y`DnFQ?U#3sawkyu=*?4g zOYJ<*gnw71zJUXO;FSNq!1#A9nnC}Awm62zU&9vH@|ir#^uOZnA|vZZ`D*_4v%B{0 zG9|(t<#|v!jOQ}>On-(B=L0SG7hf+BPHIQu+@g87q9R=)IvR}d@D0d*Qd*U0M01Q$ z@s8*=?7impjNUr9ewaIT`OlUm5vyV>VwNyJh@?rVH6s1VtfiSL9U`WQ+D$v3NgDV> z|B}a)yNw!%!h*3Zw9rntwd062&PwbGjLwL-UIN4q`7i#pZD9Oa{I;0;OU#3yfZi@^ z9I8!iuN-~?qB!J}*2Ch`E+Ay;<%1l9x1W>0*bR3?u4vqgKfbE3zsZz`F`QtFSU4Sx zF0nMxK1~(TQ!|79U^>@G+ghM(=C_`XVNqwILiQfh`2p`m%Mobjnt&0jy6$OodQGcs1NsGu-INJ%OXSp z&m>+zd@;s<1~;PfQFL5{dRL2$NOe5vWNR@?Bmx_0IBHi%73F;bTJtM-SMTN$K2*5p zBih~DnM%d_It<@Rs>DEDM&&z*=OxAYtQU>5PjfH4?UF%-4N*QYNLW9W26#EK2ti3x zmn0O0)TW#>%_wVgDHBin%z=v=@yeZZlx=kn-99!nz=)atjprbB{Dl@^>4ib=uzzwz zc3^539+COYqy0GSpFJZ#wEip}T>00PwSJ*cf>nN9K%#ZzmJ1WuWw` z5TJV|W&{v7J2Wug|vyne?DIddF=9U3E|Rg7yI0T(E<&6R|%r z8Z3L`mDC!di2rbhRTSZd7kehX>>VXWutD2Z&uTZfjk|m2`|j->{g!zfT{`J!Uq4aq zoBk<4eKjFn;e|d4gx~ z0{lESk>@q@?{#K5d#o~csJ60W2P!>ZI?|Zfc_Juaz)qEIb=Q16w4q;R5M>S-+*R*( zxot{w`-COuQ~l{_$NsX^uY|g>oCm%XYTyS~>7ga}#nQcvDydTa3ScCI!1gFH?iJ*ezyAJYgw&Lj$PwfEhYD=ds-$kv)4-!u4R82fb6EpV1_b`1lH+p+_Wxjhr+S1PL zl?glUm_b0r=uwW!vfIKARGZ6!^3q$%csWVJGhDvF<)&TE+4#A^`0>)m4BKE?b~oYG zwm%UqIV~r_$q~X_vvo;SMP+7u^>bgn?`Ut_!;^pVI~sW;w*RRPV9VWWu@Rk5vGs&q z7YUb__!(1=eRe!z#KOV$`dz|u5c9eTeTC9&4z8Eeiz62Hy};8$xLt_g;4aBo&qlxF z?$*n>q+r$(F6<`MX~bpGS=;vwXcboC7ClDL5mxv-UW9WT8630`Z4qMx?D>G+_Z!xrotAIOMvGy`@wr16 z6--yX_Le-9m_%Ze7U?0ehKNA@>Y{c1LLnq#lSu@c zWFy0XWDjW~JpkZXus{-G3sL|6tp@-3Q+?fq1o(}%K;vi#qf4iYiEJaqRbcgSGttrX z)v1g%)KgR{pgPWKs@$dGeSRgTZ^CQvt(sOpjo&5;-XJwfHBvPY?0;r>uPLsawaQJ@ zC^F}$)al+SUa6kxo@%8U24i?op@*}`nxMDMndi23R$Qt; zOBS2ZrsBo3SGh?x3DZ<+^`zkbVAQLWD2NXTh!-TDH7GbYpC~xX`A$xmlNXi~8k!T9 z_j$sE3H{xOg=leqHp?ZF1%j%j`mS(%*cMopBQ9;mk&nUcC;29WgiUU5Yz-C3v^n6uw}0U zG-khc>I}%)c(7WfWM7Tj(Pz0e0^|EObh`(&C^8p2fEyr9#u zt!ty^LQ|+okfrcHMlG{eP{&*%9q>KjcVZR41@VqI$RFJIn7=9!CV-3J0*t7%4kHQV z8zImJ2Jd6Y@p5Lw|D=h*p4d#UU1J@L3#Y6>G|Do90@G!ANQej_^H|EC=SJoc zV-!+FAh{8l6-pv7d>tR4^p}rkO=M@noO0;nJYpL*h~y<=Ox=qig{l2BuM;N)R@I} za-7a~!pbWZQvo7Tizl~HetHxc&$`(laGhB^tJu}2A>>>4pOm)H;v?o}_hzRf&JJ7Y zT4+8}`jmjQ+?Qn;D`jco&yGw-pnYKbBq- z%`kLB+2w_omVi2!z6GcSeJ-_|?2KlnoqF+r z$A)-JXzfh_`%q0xJ7cFGEPoqPj66VzMWcPqOK9w-^H%l&Ihu3?|G3KqqZqWmTV z`z+Egrh!LY2;0VuP534hYrQGf3hlxoABEI^S&_MJAfyYpA9N}4241yZz`K;<|Absfei}~xNDjY&EaY9C41cHidwMW? zf4xv%JEr94=5sZL0(o*p_qZ?mX^H^5t04=}ZU%E2la3dy{LoEW`8$+3`uWqoflBKQ zecPDmx6j;1?F*lI)T1j|v|j3l_CGO2GR(WgZqkR9MM=jys3!3#1Vo@A@^z*WAaGH) zBVuSM!o-8xzdb1F`Z4wqbhiYY(a(~^n>}Wu&ZA~~nB!|Jd$3|{>~Y=zzHb*Y=Jgvi zsa#>-6!d?NyWw;&$1YctyuLM(tZ7}yWGv9-$B7-wB-$xeGiETE+`8G> zhO#?{IonY-=lalM?k?nDAfAQE{tp0j(9QXkFCq5n6mL0H4jJnV0c>*A{8f{q$Yo2M z#w0keo2K;Qb22k%IyN=uhXq!yfWCdb=o7`h=d1(PS8URR=U)yT)=|nn8q1ATHbpZj zwpbp8UA)H`rgxf~9Rn#t(u=4nHq|)Y@G(nI-d>>f(fZzV@x@U~zR16n$f+;jJay~t z_Cx2Q=Dc@$p1GE~iuKm(TCpJ>Oqk*w@9Y3SPV3yhy_41+6xar}Jt=Ld|NFPT0R%$a zwd?+JNBa9mBMEo(o3~=-U+ypu+=?`M)Oc9Ylv+<`ad|=wfD3lEnYIGD-N@w_eF+2T z9kMWFxP3YF8V|$)yX%@=`#t+zo5u}%4wqNCt4S&u0@#V?*POpG=&mN;fQO7U=T2g! zFl%d>DXdAo1_fs(g*l0RS9xi^UFC>%xTtv0s2><*=0S@#Q&prO6R964%12 zVSNi0^c_jHzy^t(y+?=MQYkxw>8-B*@nhr0*|Yb_z|PYnjF(8KT#%Jtd@PpMWM!>( zSmn^&17D?(u#-&#NnsJ?hV#s%@Uny^SS9Snym>c{q*6dYUXRIbF|I9@>@JVq%F0)- zHf*S<*c}9{;fD{kwGAdBn_<9cSkGhU)vL|qEJDQevuEFSD%Klkgk;Oi=Ai75R|aAR znLBwUAf{O+Z^Dd{te~q`huNLJpH8R7@$@{dju&^B7t7V_xpDl%v3&h9=*ty*m>3MNHAZ_trsip=*>URG}Mdr4?IM2~>Qn$4g7XD~2sSVHk4f1An+2-()8zpmNb{!(oDV zZvyHsV&>!eaY*z`bR~KU+8ZJgH_RRFo05{Dy?xw1?4KzbfB&2T-+?V#`hEM?N#@RV zb9=`d;Q?9ejcKxJ=EKDvy=QN-3REKs?bT_r!dFdr5;LZqo{6LZui7d9Gf_ zlgR6YX|gq?M4x6KPufg^C)q$I2FHB-8B{6(wI-H+7i0;t!5u;H0aEMa$f#t|OL2ro z3S5jnN%NI#o-XEEObl;rYd${7*|_{ds?~a!eqTY*jZ@E8x-&J%y|=@v@-`Q=Mk4OG z6r8Q7J_vkWpB^X-PGzXT!7yGdv`6=K*?&Dj-&j#jfA62dT89o$#1l;Z9rz>aBai+K5sw7DQaiWM?pxqM-@x&&7@dS#y6D#V-OAAoTEo#N0xdwxqYQ+hb1IvyU zaCIY!HbOfE!^%XI{2Bs6P+iT3y{MSzJ+_-683%R;$AdV!^^$z2d!MC_i_eSQDlZX~H$C3bmoUhhn@D z1vlRr(Fqh$G)NB6#Y0K%e>bo?v>y1B_Cf=+WwZgP*9>?4)^V5$yg^laS_K!;+ zafLUTbUo|_k&N9DZ-iYZp?&B=NCXM#E}N(n8=|m}Fg3CuPByN8+$sBq2IcLA;VyG} zrKj`)N%p=upto4BT*KtafgA>+B4!k*6hQy|1zpM1#776u%{9mo-1s%=srrAE88a|? zy)jxi)ezs-q2X3w0^pqK+39J#el+gf=>?l`?}INmeJ<|kbtCV_;=GuOzHXk!xyO07 zD_G7tuY#@!3(v(zTzUuUr-E{JxfiD@l+;o%=A+(3mW zxIRD(e7GwS>c)hR;qas^Jix!1{PA?dzg6jYe`qV>9sXZ0WjvpK94ci6qx#LH4L}Sbc7kmVx!0X=FWP8JW+o)wJ5;BYZL@X|eh9 z>ehbMV)4HUXt5mvFa@pO-fsTpfob{L@>%_k5@4^_e|igIq=odEhe}JcVQa9qv~gXJ zq+JWC1rYiHDn>acFWeFuYWYcazQw}B=fLi(Kky4K$RNd^8x6P6>vQOy|)dH0-(QI;Sd0wa8{*dMy){ks-YA3P zM6()(KeMDsUzBzPr_7FNE<@fx@hu+e-hg&>mb}u~E#60{yp%=o^Al$Rd4!EKS z>$~SpyvY8tr|Ui==FcTDy3-LUx|M_9qsgA(HLFI*cq<o`|znOoK957{etLwM*FCfS>BJ!!- zt2U+)?#<`TGeeAQlJFCpR-I~kzFB)hl(=%--+ds!Qs5c2t8?7IVWhcu_5S*Pf!0xE ziK6_Ei%c3_a9JDSX5k6I&e_g;2}djJX=q3Nss_?n0g?U+66ky99P{7a8?7;=$Il~ zzbkTl&>|l~8m5Y;>ALbtZoU)4v4zisfDz&Rp|e`J!;_dC zh|dwe#V}v9q8|(T)~vfxp{2IGb?h{t0+*m8O~p|@y5|Rs1o;>>4<&(|+G#+KXc+@x zXv~vi(V>8BkD*6|Vw|BW>vuy#wmNVTR_2b=wbZ5;E@^o1`fRU7R$->0N2zJtlAphM z1uE9*EQhmQSMt`ZljI}l`f1xvCIi#ttv^d~%#b33{5aZO!q15{IFvP8@uO53qpXuP zhWO|ods1Z+VjR>q2R91jae++YZwfd;W`*sR$%^iQSWnBcTb6RxvJ^PWFsMMj2=13&`#u0>I%C{6bN zB|(n3HC*e1po6+ZJvzs9VKqukMH{GVW9MQ&_Wh2gwt^*G18~rAOJY{s)>yDV7RuJl zey-=l{KCOw4oQk84_|xVo!yt#n~F+&WX4BUp=d>c5nVWhL&(zA-46)OzRO>7O-HUP z_s1S6wopi(M;OEKyxV$6rKTVVIADL42q-cVH*5u0364pMYqZ80=IM!MQ%h`_txL1E z^E&M=ZP}RK(z9h#n?s(jzh~#~-G~B>JPCpdq=|c=A0iK-NpvY{2O%XYdtVcH1rT60 zwQ^c#f{-9VYr;!yes6LFEs0w+Lk+WbzIV$etIXT^48MPcijpX4|B zHKcw>WL1#51Y*{HU&))A73l{;?kYit7HH-Gc`2!L$(>~?4-$p_5Ci|Gvuvfi3sZNb z!yhghpt7AA$-9~v$+5gR){^Wcnq@hFP%)ILB-?jSI+TJyMVqd83b5Bi(e#97~lrbhEn`|~QdBiHnYAK}1{j-sP78l3z$ycNSATQnG5`b4n zUORziX1a{I^MS*>0cmziT-KOwunxX-0Qq?h177eipZ*bU4rIC_AJf@GZ3Y7q;KlJ} znML(og}YOrb_VK!3RnxQvFg&7atl=rt*Ml%?@C?z2{*m21#raN_*L$i&|hlM`?1Bq+OuZLBd{t@ZTplU1{6EH)As3<5pGd7vG{2v85=Z=jEu z1ZIdgKqn|3zyU=7tWdInNy(g2gplk)on#M+;W}J@xDk|Ud>@b1+*YE5VQ|SzAt|^YBa|?T8H7Ynlf}nS zBh!Au&Ja%Zii*%Dt($<XqR`TMmi}Fir!lIvT>{(o;8Ylh6+plRt6UKBN5$0X8j<$FTv< z8zXsjODzifHPU#Ui}r@4QA2d!=3)+FGm6qv7%$f2k^h zN=E1mAF`C5#2PAhz4?*=sc9Z-N&_{r2p|0xpZR-hCsp>OMk(r!SqLSyGgG|GB3X=? zj*}gLTC0WcTfOd{xBS1a);8DZ(IVP6`L-v%}{JB>xgVK^x;gVHF)sziIlXR@VuM6wu zEK9=?8j7yLaln`y0XygPSRwr*^6>N^m@P*vPh)b}XO0+>wqY|pVFTJeg||5HM(zN{ zUc6@oAGt}l@B7HFGj!CiIjpF&P2BEbe_65#{=~5?#+1 z1!hJTeV@6iMnUTlk6>yPpueh&b0-ZmuBw6wn~;my2(4%e>*v-m)@ra$U~?pP(i?hR z{_l}?7jAf<|2|BI*eMj<2@2EX%KPau^b%;{La+>*f)}?l!*BYtRk{LC4B7nYk}hfV zyPFWytBC5y^-H7=zrxL8<@t~}tpeH#=hFr|KIk9E;T(Dt9+RewOHgD)-&v4PHVi0K zGLl{Q2T=ME4e^;vHay=ayIk58r(ZS1)5Z!;n02c0-k)j%+OJ>zK@cb8Ph84Z$9CYv zUq@&usj}cAT-hTC&wDuU@j1>|_bDtseh()k4G4^M3^Mj!&mziQ&n@m62xGIupk)fY zf^ijL0{wQ1Ir|nkH#R1t%kgcA$3&N`q^$67pUjr5_JGe1|BCMR8o2BXn=AkpVIvFM z__-m_IyfsL4nUgC9(t}Ne~wEc#|;o7H2w=mqcA&ytr+LTnL9B03#TJ6D@L6-=SGQr zWaJzT32DrYxq%LN&Q6OGo^gv|qNjO{p|}|+(`pv&(b=YnbtEQUj)B(E7qInL<7@1p zz~rjy%mYi=D_k=+#<})yk99|u?B!~VP%*!n=ysUohgPMqSWB%Y>A`I|7Bv(e=U%PJ z5Lh;Ys?Lgt^~YsXZ53~hVsHn(`DGQY!}HsjnGYY-bspvDv-6L_ z3XiyCBkqE*)9iYV;_1|s&FVv-^uacg_!05*aU~@;B988f(j3t`S{A(?sfb^0J<#gD zTn%=JVPQPpS}z6S74^+YY|LYLxIQ&~`m0*&n_3Sn*m{9yX}Gt&hYlWcUaSH;C8#(N zJLFdqOz5wd!nWMu)oCUJJSCJ5Tk-lQ*8vNT*m2jJa&NBULq@?$g$+$m{GnwWv_CHs|(HQ53BAqh4ou&^bs zl3)~QVBRo718gHhni~uOqD*d#%?Wt<=~-qlRIkALDPCC3bt;!%I&A~IwR8xAx%5Qf z#DEKdLWuG;PRItUAh38vlvY_`hj=}~u{@Bg-lrF%z!L3Hr=zzvaW5-v6z+is(S9Te zdq6oTd9d}K#8dG9Pcg1zYVF~8d_eM)y6mep(X{!l+LBA)3@tQp2_usggtHjb=I3x) zsmB6KJ3{llwtg`U%?$j?^DYFbb<~4$@|}3UqjV%lir zLs&8}2*G93ha?nI8ePsQdXysuxT2`AA()oT_l1Cfqgn&*sZ;w=5p}Gj`p{uFkXZ(`IZbQ6?)P&vPj$r zcqT&m8R>S`ow9EkX|gB1?|WAaWmYGHz1-fXQFel2)F4%ujl6!VzTep2_ujly-iX(1 zkxoB*^hH@{=3juGI^$pvni9%TAj=3(i(%YxONmkE&f4PXC|PP%(GS|yo!Oq=8DU)r zKi2OCjC$~r{QUklmjUyi)jJh!Pv6{@>s+%2u6lNAM`736PPYvSib$)?wZU(h(;GjC ze+~;=(d+eUX#6NuY7EP1^y4q)*6Uio1lU^6L~x*E@lRjnvlAbqmFvyaWf{H!ocL@L zt;aUHKln&HA>2$ZI@Xg&_7Air7GDfD?M+V7xGZ$lyQ>V~iL5Y*ekH6ZvfXZ9C=bi1 zgL&6uQ7HB(8Ys;RNmI7#V_u?7-b9QZHA}JZj$aR|&9Qt@*=0wadKmuX#@od=G8OZuuSjz9N|0_OI70t;~q6e&nV1T5}tY@MLejyTUqi z-EZpvc9BahHni^nb{*mHB|`lS-(l8aANGb$nK{^D@5@*YV)Y3J;aVakc4Rw-VxeHRk6`;+{VH)4_Yic1B?`D#ftxioK5iviGr<%%@)~{Y z8?0WtAm5yY7QuS)p+grJE>=GC5dSBR7mDJ=|FQ@gqlUzC_M{6TPv?PU#4+M>(tpGn zA_DcbpVt2sg%FG4!W}L1L7s zB`P2|{?PDBT~fKc(OK9aG-WGQX&y=L$!=+GDuoILW4KbG|CW<2#c-Pg&w2myr~nmO z#6VMNS5}PddnMb>>#?jTT^o;%FIl^y(OJC5FGZUBIm?Z5twem1R#+Wjz%&DZg`dcGH=npnrri0G(Z zYf^tp%NANR#Nb&x9alY$gSnPREroY&DM+!6Ym;f$>3oyd6I*fP@UkZVCPQWpSG(RxEtRN7GuG_7lZ}O(^M$!;61Pz%%FyN;{ zwKKZWyXoGW=!yk4f~{eT9N~(TX$~R3o{eao9L}}3N1vL&e>jESxHAX5{(^*%T5UQ6 zdZ8e;6C!q_6CgiQNP5N2c+VQwyp#}foTDe($a+q{$R}ZLfEzZ${qRgXnU+c!d>OPuBJm04peuo<60!HZIAgb1M}@vLm-tQ?_J>pOKI;HWPj3a9CzBi%+Rs|dw#UjUqZ zkCneZyX1O^VPiBiZPZIAT&Je1qA|D0_ZX^)X=5CU74i=tMaUDBNY_Bn3kc!GT9Qcz z8bJ5#PTr*2l-wQ1v9}_vE;Xno5DK%bm6wJV6@`|{XE^FR4=s|H&KLy#k1I|0PmxGc z{L{w-$(^T2(*4IMNIjV&%ee_|4#|c}M*rDR({SsTYzsPr7rAl1m7%zuIH(vb{8zz< z+~iYRdL8?)RZtO0(s;)Ncvk4w(!l@y2wmSz1@@&h)tdA$1gtjwL6J7Sl$P zk|)odD>Gvb*a~+tDOtB?a?%(^Mnrx}!$-u30JU^y`YC``(DH z`zKKIE*k)|C*bhlh0Kb31o>&2WP*pJxdW_57%b`+N}UiLqc!y zme+%%E#HEe)9d&4T~yj|(2IsR?_;JB>PQBxrgm*%(T1tJ+W#aJN-;0v21&2C6esld zP~$}d2#7#K)Qe0bK;V+@*U*WH(0Cry{_)?^{?9Z2hyE%Bf9jWuV@<9rP!~`uTurgH zl|xvOjURH@1wI}0W5OFZX%d;-w#WaEY!}1X0FF&2FMW19r(T4}2e}?H zXc1(rGx)N}nx7iyYsd@N+s~Niuw#M3ozKZgr|H<#?9XP{B02Q&!zE7*`)>ywusmgx zVjjIeby`PR_{O-%NM%#hsgWfz4R-Aj2bkV|RaOL~2uv%cD%n)yV#AwEJ^5gv)>G^C zx1$%1TKZo0zEmax0q249;Gj1;2etB5&z3}6FY3}=u^=YbBi6wV-b?E{c(9Mw z?mwpuYP(-nzwXD6b@gBl#NEE*2X|_8bUM>;r@OyPm;K;Q^1$sdqic<;1udwhJXOmV zWB^=ns4adbq1%mIj&amv0IyNCP8jYyF1^MTNkB(kv(u>Cs8jRoNw>*GRW2%$DTWYs z+=XoyZjQUC$UPJhQ_VT4tR!Y_Ei;LgI_#*??N4H+vaiVLC_XQ&S$m6R%VhZlO`jG+ z893wQtikP%tP&=B;B6qgI@WmuxYSiVR$B(&bTHrjf|>Fx{;N5j*-YP{`|9% zp2*Bx`~35#*Usl8u?)3U?Gcd)7#rqQ+7BecTholXohVnOl~-RYH8!)^1~=#8#muxl z2vW&b5f3|$hp9aaq+DK8%>%xXagN#d*{I`qCu>7SV4>gvy*ckf=Y;+Pa1 zIy=Q=adgT>>7q;bM3S1!%&m5fc7sFkMni>8x&X zQjj2dG3K{LIJH!=`(1}CE1y2ywQK3p4u4IL?#Oz zM~y?DL@TLEG%J%7vGUCJYl9U|dFm^|x}L%k`A#hxt)wZ@r}DIy1xh~F^@(}jQ-(Ed z-X!G02R?8a{yy6igUUg3(`iC?2LkGEV&>yUaY*zsbR{|$?Ex8yo8(TrAJ&=^XWh7C z*zdV&AD`?oud#jmM!iNSX;!Urc7Dm5;sI&v%>~j0$Sry*_rq=w7D%_15j~qd-Dt}QZe#!yB zZw@@#;=)uTkKPHc%G+Dm8iu&*Sa@z}^-196^x#BMKr%xKP6qQLp(DBv8-E+}|H#tV z|F|)LwM?9#h+^V>dhng9&eZ=@|E1L_>wa`-mEr_}C{9Q%)E6*fTG^^7o6i>W_sS=Yzp zWcW_)mL8!?uE{unAPopwFeM(&6RU#NbXAB-RXr?zcSUt+nRvKbr3yBP6uT`S7HuCv z`K9wj>R`1{9imn)HEYZJPlT7E@YYKsdLBg>4ibHJu~34`k6x?}tp}cE!_XLQBW(;C zz8vU{m^yt%#ZT+951u`W9i3ePqNN^W(v9GoL^3u-ycv9hg!ZKSK|;un?l_H0vLcE` z2nAtz{WNhP%QUmJH|>iu6shL+`U(y^hI~0T~QvLYEXO5655RMoTc4ZpMpxANv*pL+epW0$ zdp@e`hEWV+aa|&0FK4%2ZZ9wO1e;hFOmKs-+l8oZf~m$E-J}S&cEfyfrlTEhRBvB> zgGW+Q5bi(|(OQneL*B&PLZXnf_oQq;j30^x2)1?S8Wk<)j{8<6~1cq zwXEB7MoJy(%Ixe9*(dH{g@7Qc$%<*iq_U4yX7#}|uMq57Him|-PQ>#&OKMYC2G%2{ zk@b*eWIk$!Q)){(`P3zDvHI|`wtsNq@INSMNg4q#g{>c7?EUD9Y5CCd&VsrKuxC5p zz5o%DA_lBRrKDJ~HCSuPy0V=qO>1%?gj#@#QLM}hF$V>iKgn)uHj9vzu;Ie&z`*Pf zq3H zuElA%7cIIz1{W-9$vAwzx?44&5_{k@4BTmlbI)mEHOvRLVL2f=(;M1+%{3O;z4a;0 z*iJYFL(dn&ajN2 zQNfZf(@E2v3-E9s?SI0Re`Tx%KJQMJX1D~ucX0a0WAE=o?A}@TqhC+@pGt^y6Zo5A z{?b8Q*Ys^SNBtkBQ>5pWx@QltuYUzMXOnCF(k?l22WlV#E%7D59~V1&{Jc)=USpEC zJM~7(tiD!pW@_D5JFT8~(;7H$T8q8Yjkl`11&4Lbz=DU7hQxcVko;?k_^8rWpERwd zXomQ&l@5(wB|~=1dKWlFFwNaW&(wNW`N966J0~Y&{r@2)qSqYieSh>D1UW`bxpYU> z*0jQd`I3;f2d^%eu!n_YZf zttqel+rxvAyW5?v{K_A-LxqGI4O(=_Sfa;(5ff&V=&mubaoxK2=oz1on3SB7nifjW z$jr*_mD4*nFTYRUf_{ZX#U!%jwk-Edw%vVClgm8WDomM`37_$@cH6e5umwQnR zQ$@{&^)M-N)Y(VRCjwQtMV&Y*PRiLIKV%{(=cHvU=TJ~LKk#Xb+7t<6+%k)f1>^>d z0}>R|0ySwoj4#O=M?S&H(sRCBXRK-|;}x5$yH`b}g~kEaQOQwQuKWTO=XFu+3cj~P zXVGgK5=7&?dr +
+
+ +

+ {{ title ? title : t("common.loading") }}... +

+

+ {{ describe ? describe : t("common.waitTip") }} +

+
+
+ + + + + diff --git a/EdgeCraftRAG/ui/vue/src/i18n/en.ts b/EdgeCraftRAG/ui/vue/src/i18n/en.ts index 39d3cf0fa8..4c6ba3e7f3 100644 --- a/EdgeCraftRAG/ui/vue/src/i18n/en.ts +++ b/EdgeCraftRAG/ui/vue/src/i18n/en.ts @@ -27,6 +27,14 @@ export default { all: "All", reset: "Reset", uploadTip: "Click or drag file to this area to upload", + loading: "Loading", + waitTip: "Please wait patiently and do not refresh the page during this period.", + copy: "Copy", + send: "Send", + regenerate: "Regenerate", + copySucc: "Copy successful !", + copyError: "Copy failed !", + emptyText: "The content is empty !", }, system: { title: "System Status", @@ -85,10 +93,12 @@ export default { indexer: "Indexer", indexerType: "Indexer Type", embedding: "Embedding Model", + embeddingUrl: "Embedding URL", embeddingDevice: "Embedding run device", retriever: "Retriever", retrieverType: "Retriever Type", topk: "Search top k", + topn: "Top n", postProcessor: "PostProcessor", postProcessorType: "PostProcessor Type", rerank: "Rerank Model", @@ -99,15 +109,17 @@ export default { language: "Large Language Model", llmDevice: "LLM run device", weights: "Weights", - local: "Local", - vllm: "Vllm", - vector_uri: "Vector Uri", + local: "Local (OpenVINO)", + vllm: "Remote (vLLM)", + vector_url: "Vector Database URL", modelName: "Model Name", - vllm_url: "Vllm Url", + vllm_url: "vLLM URL", + kbadmin: "kbadmin", }, valid: { nameValid1: "Please input name", nameValid2: "Name should be between 2 and 30 characters", + nameValid3: "The name only supports letters, numbers, and underscores.", nodeParserType: "Please select Node Parser Type", chunkSizeValid1: "Please select Chunk Size", chunkSizeValid2: "The value of Chunk Size cannot be less than Chunk Overlap", @@ -115,9 +127,11 @@ export default { chunkOverlapValid2: "The value of Chunk Overlap cannot be greater than Chunk Size", windowSize: "Please select Chunk Window Size", indexerType: "Please select Indexer Type", - embedding: "Please select Embedding Model", + embedding: "Please select embedding Model", + embeddingUrl: "IP : Port, (e.g. 192.168.1.1:13020)", embeddingDevice: "Please select Embedding run device", retrieverType: "Please select Retriever Type", + retrieverTypeFormat: "Retriever type can only select kbadmin", topk: "Please select Top k", postProcessorType: "Please select PostProcessor Type", rerank: "Please select Rerank Model", @@ -126,14 +140,28 @@ export default { language: "Please select Large Language Model", llmDevice: "Please select LLM run device", weights: "Please select Weights", - vector_uri: "IP : Port, (e.g. 192.168.1.1:19530)", + kb_vector_url: "IP : Port, (e.g. 192.168.1.1:29530)", + vector_url: "IP : Port, (e.g. 192.168.1.1:19530)", vllm_url: "IP : Port, (e.g. 192.168.1.1:8086)", - urlValid1: "Please enter url", + urlValid1: "Please enter vector url", urlValid2: "Please enter the correct url", urlValid3: "URL cannot be accessed", urlValid4: "Test passed !", urlValid5: "The URL has not passed verification yet", modelName: "Please enter model name", + vllmUrlValid1: "Please enter vLLM url", + vllmUrlValid2: "Please enter the correct url", + vllmUrlValid3: "URL cannot be accessed", + vllmUrlValid4: "Test passed !", + vllmUrlValid5: "The URL has not passed verification yet", + nodeParserTypeTip: "Both Indexer Type and Retriever Type will be set to kbadmin at the same time", + indexerTypeTip: "Both Node Parser Type and Retriever Type will be set to kbadmin at the same time", + retrieverTypeTip: "Both Node Parser Type and Indexer Type will be set to kbadmin at the same time", + retrieverChangeTip: "Please go to the Indexer stage to complete the data", + indexerTypeValid1: "Indexer type can only select kbadmin", + modelRequired: "Please enter embedding model url", + modelFormat: "Please enter the correct url", + retrieverValid: "Please return to the Indexer stage to supplement information.", }, desc: { name: "The name identifier of the pipeline", @@ -143,14 +171,15 @@ export default { windowSize: "The number of sentences on each side of a sentence to capture", indexerType: "The type of index structure responsible for building based on the parsed nodes", embedding: "Embed the text data to represent it and build a vector index", - embeddingDevice: "The device used by the Embedding Model", + embeddingUrl: "Connecting embedding model url", + embeddingDevice: "The device used by the embedding model", retrieverType: - "The retrieval type used when retrieving relevant nodes from the index according to the user's query", + "The retrieval type used when retrieving relevant nodes from the index according to the user's experience", topk: "The number of top k results to return", postProcessorType: "Select postprocessors for post-processing of the context", rerank: "Rerank Model", rerankDevice: "Rerank run device", - generatorType: "Local inference generator or vllm generator", + generatorType: "Local inference generator or vLLM generator", language: "The large model used for generating dialogues", llmDevice: "The device used by the LLM", weights: "Model weight", @@ -167,10 +196,11 @@ export default { "Sentence window node parser. Splits a document into Nodes, with each node being a sentence. Each node contains a window from the surrounding sentences in the metadata.", unstructured: "UnstructedNodeParser is a component that processes unstructured data.", milvusVector: "Embedding vectors stored in milvus", - vector_uri: "Connecting milvus uri", + vector_url: "Connecting milvus vector url", test: "Test", - modelName: "Vllm model name", - vllm_url: " Test if Vllm url is available ", + modelName: "vLLM model name", + vllm_url: " Test if vLLM url is available ", + kbadmin: "Third party knowledge base engine", }, }, generation: { @@ -225,11 +255,28 @@ export default { activated: "Activated", nameValid1: "Please input knowledge base name", nameValid2: "Name should be between 2 and 30 characters", - nameValid3: "The name cannot start with a number", + nameValid3: "Alphanumeric and underscore only, starting with a letter or underscore.", desValid: "Please input knowledge base description", activeValid: "Please select whether to activate", - uploadValid: "Single file size not exceeding 50M.", + uploadValid: "Single file size not exceeding 200M.", deleteFileTip: "Are you sure delete this file?", + selectTitle: "Create Type Select", + selectDes: "Please select the type you want to create", + experience: "Experience", + experienceDes: + "Experience refers to the knowledge and skills acquired through practical involvement, trial, and reflection, serving as a key foundation for solving real-world problems.", + kbDes: + "A Knowledge Base is a centralized repository for storing organized information such as documents, FAQs, and guides, enabling teams or users to quickly access and share knowledge.", + type: "Type", + original: "Original", + kbadmin: "kbadmin", + typeValid: "Please select knowledge base type", + nameRequired: "Please select kbadmin name", + waitTip: "Please be patient and wait for the file upload to complete.", + done: "Finished", + successfully: "Successfully ", + failed: "Failed", + totalTip: "files", }, request: { pipeline: { @@ -248,9 +295,51 @@ export default { updateSucc: "Knowledge Base update successfully !", deleteSucc: "Knowledge Base deleted successfully !", }, + experience: { + createSucc: "Experience created successfully!", + updateSucc: "Experience update successful!", + deleteSucc: "Experience deleted successfully!", + }, }, error: { notFoundTip: "Uh oh! It seems like you're lost", back: "Go Home", }, + experience: { + create: "Create Experience", + edit: "Edit Experience", + import: "Import Experience", + fileFormatTip: "Supports JSON format, with file size not exceeding 100M.", + importSuccTip: "Files upload successful!", + importErrTip: "Files upload failed!", + uploadValid: "Single file size not exceeding 100M.", + experience: "Experience", + detail: "Detail", + operation: "Operation", + deleteTip: "Are you sure delete this experience?", + addExperience: "Add Experience", + delExperience: "Delete Experience", + addContent: "Add Content", + delContent: "Delete Content", + total: "Total experience: ", + unique: "Unique", + selectTip: "Please choose an appropriate method for data update", + cover: "Cover", + increase: "Append", + deactivateTip: "Are you sure deactivate this experience?", + activeTip: "Are you sure activate this experience?", + label: { + experience: "Experience", + contents: "Experience Content", + content: "Content", + }, + placeholder: { + experience: "Please enter Experience", + content: "Please enter content", + }, + valid: { + experience: "Experience cannot be empty", + content: "Content cannot be empty", + }, + }, }; diff --git a/EdgeCraftRAG/ui/vue/src/i18n/zh.ts b/EdgeCraftRAG/ui/vue/src/i18n/zh.ts index 2a1a318851..24b810bdc4 100644 --- a/EdgeCraftRAG/ui/vue/src/i18n/zh.ts +++ b/EdgeCraftRAG/ui/vue/src/i18n/zh.ts @@ -27,6 +27,14 @@ export default { all: "全选", reset: "重置", uploadTip: "点击或将文件拖到此区域进行上传", + loading: "加载中", + waitTip: "请耐心等待,在此期间不要刷新页面。", + copy: "复制", + send: "发送", + regenerate: "重新生成", + copySucc: "复制成功!", + copyError: "复制失败!", + emptyText: "内容为空!", }, system: { title: "系统状态", @@ -84,10 +92,12 @@ export default { indexer: "索引器", indexerType: "索引器类型", embedding: "嵌入模型", + embeddingUrl: "嵌入模型地址", embeddingDevice: "模型运行设备", retriever: "检索器", retrieverType: "检索器类型", topk: "检索 top k", + topn: "Top n", postProcessor: "节点后处理器", postProcessorType: "节点后处理器类型", rerank: "重排模型", @@ -98,15 +108,17 @@ export default { language: "语言大模型", llmDevice: "运行设备", weights: "权重", - local: "本地", - vllm: "Vllm", - vector_uri: "Vector Uri", + local: "本地(OpenVINO)", + vllm: "远程(vLLM)", + vector_url: "向量数据库地址", modelName: "模型名称", - vllm_url: "Vllm 地址", + vllm_url: "vLLM 地址", + kbadmin: "kbadmin", }, valid: { nameValid1: "请输入名称", nameValid2: "请输入 2 到 30 个字符的名称", + nameValid3: "名称仅支持字母、数字和下划线", nodeParserType: "请选择节点解析器类型", chunkSizeValid1: "请选择分块大小", chunkSizeValid2: "分块大小的值不能小于分块重叠值", @@ -115,8 +127,10 @@ export default { windowSize: "请选择句子上下文窗口大小", indexerType: "请选择索引器类型", embedding: "请选择嵌入模型", + embeddingUrl: "IP : 端口,(例如 192.168.1.1:13020)", embeddingDevice: "请选择嵌入模型运行设备", retrieverType: "请选择检索器类型", + retrieverTypeFormat: "检索器类型只能选择kbadmin", topk: "请选择Top k", postProcessorType: "请选择后处理器类型", rerank: "请选择重排模型", @@ -125,14 +139,28 @@ export default { language: "请选择大语言模型", llmDevice: "请选择大语言模型运行设备", weights: "请选择模型权重", - vector_uri: "IP : 端口,(例如 192.168.1.1:19530)", + kb_vector_url: "IP : 端口,(例如 192.168.1.1:29530)", + vector_url: "IP : 端口,(例如 192.168.1.1:19530)", vllm_url: "IP : 端口,(例如 192.168.1.1:8086)", - urlValid1: "URL 不能为空", - urlValid2: "请输入正确的URL", - urlValid3: "URL无法访问", + urlValid1: "向量数据库地址不能为空", + urlValid2: "请输入正确的向量数据库地址", + urlValid3: "向量数据库地址无法访问", urlValid4: "测试通过!", - urlValid5: "URL还未通过校验", + urlValid5: "向量数据库地址还未通过校验", modelName: "请输入模型名称", + vllmUrlValid1: "vLLM地址不能为空", + vllmUrlValid2: "请输入正确的vLLM地址", + vllmUrlValid3: "vLLM地址无法访问", + vllmUrlValid4: "测试通过!", + vllmUrlValid5: "vLLM地址还未通过校验", + nodeParserTypeTip: "索引器类型和检索器类型将同时设置为kbadmin", + indexerTypeTip: "节点解析器类型和检索器类型将同时设置为kbadmin", + retrieverTypeTip: "索引器类型和节点解析器类型将同时设置为kbadmin", + retrieverChangeTip: "请前往索引器阶段补全数据", + indexerTypeValid1: "索引器类型只能选择kbadmin", + modelRequired: "请输入嵌入模型地址", + modelFormat: "请输入正确的模型地址", + retrieverValid: "请回到Indexer阶段补充信息", }, desc: { name: "Pipeline的名称标识,用于区分不同工作流", @@ -142,13 +170,14 @@ export default { windowSize: "每个节点捕获的上下文句子窗口大小,用于增强语义完整性", indexerType: "基于解析节点构建的索引结构类型", embedding: "将文本转换为向量表示的过程", + embeddingUrl: "嵌入模型地址", embeddingDevice: "执行嵌入模型推理的硬件设备(CPU/GPU)", retrieverType: "根据用户查询从索引中检索节点的算法类型", topk: "检索时返回的最相关结果数量", postProcessorType: "对检索结果进行后处理的组件类型", rerank: "对检索结果进行二次排序的模型,提升答案相关性", rerankDevice: "执行重排模型推理的硬件设备(CPU/GPU)", - generatorType: "回答生成方式的类型(本地部署模型或 vllm 高效推理框架)", + generatorType: "回答生成方式的类型(本地部署模型或 vLLM 高效推理框架)", language: "用于生成自然语言回答的大模型(如 LLaMA、ChatGLM)", llmDevice: "大语言模型推理的硬件设备(需匹配模型规模要求)", weights: "大模型的权重", @@ -157,17 +186,18 @@ export default { vectorsimilarity: "根据向量相似性进行检索", autoMerge: "该检索器会尝试将上下文合并到父级上下文中", bm25: "使用BM25算法检索节点的BM25检索器", - faissVector: "嵌入存储在Faiss索引中。", + faissVector: "矢量索引存储在Faiss中。", vector: "矢量存储索引", simple: "解析文本,优先选择完整的句子。", - hierarchical: "使用借点解析将文档分割成递归层次节点", + hierarchical: "使用NodeParser将文档拆分为递归层次结构的节点。", sentencewindow: "将文档分割成节点,每个节点代表一个句子。每个节点包含一个来自元数据中周围句子的窗口", unstructured: "一个处理非结构化数据的组件", - milvusVector: "嵌入存储在Milvus索引中", - vector_uri: "测试Milvus地址是否可用", + milvusVector: "矢量索引存储在Milvus中", + vector_url: "测试Milvus地址是否可用", test: "测 试", - modelName: "Vllm 模型名称", - vllm_url: "测试Vllm地址是否可用", + modelName: "vLLM 模型名称", + vllm_url: "测试vLLM地址是否可用", + kbadmin: "第三方知识库系统", }, }, generation: { @@ -221,11 +251,28 @@ export default { activated: "激活状态", nameValid1: "请输入知识库名称", nameValid2: "请输入 2 到 30 个字符的名称", - nameValid3: "名称不能以数字开头", + nameValid3: "仅支持字母、数字和下划线,必须以字母或下划线开头。", desValid: "请输入知识库描述", activeValid: "请选择是否启用该功能。", - uploadValid: "单个文件大小不得超过 50MB", + uploadValid: "单个文件大小不得超过 200MB", deleteFileTip: "您确定要删除此文档吗?此操作不可恢复。", + selectTitle: "创建类型选择", + selectDes: "请选择要创建的数据类型", + experience: "经验注入", + experienceDes: + "Experience是指个人或团队在实践过程中积累的知识和技能,通常通过实际操作、试错和反思获得,是解决实际问题的重要依据", + kbDes: + "知识库是系统化存储信息的集合,用于集中管理文档、常见问题、操作指南等知识内容,便于团队或用户快速查找和共享信息。", + type: "类型", + original: "原始的", + kbadmin: "kbadmin", + typeValid: "请选择知识库类型", + nameRequired: "请选择kbadmin名称", + waitTip: "请耐心等待所有文件上传完成!", + done: "已完成", + successfully: "成功", + failed: "失败", + totalTip: "个文件", }, request: { pipeline: { @@ -244,9 +291,51 @@ export default { updateSucc: "知识库更新成功!", deleteSucc: " 知识库删除成功!", }, + experience: { + createSucc: "经验创建成功!", + updateSucc: "经验更新成功!", + deleteSucc: " 经验删除成功!", + }, }, error: { notFoundTip: "Oops 好像走错地方啦~", back: "首页", }, + experience: { + create: "新建经验", + edit: "编辑经验", + import: "导入经验", + fileFormatTip: "仅支持JSON格式,文件大小不超过100M", + importSuccTip: "文件上传成功!", + importErrTip: "文件上传失败!", + uploadValid: "单个文件大小不得超过 200MB", + experience: "经验", + detail: "详情", + operation: "操作", + deleteTip: "确定要删除这个经验?", + addExperience: "新增经验", + delExperience: "删除经验", + addContent: "新增内容", + delContent: "删除内容", + total: "经验总数: ", + unique: "唯一", + selectTip: "请选择合适的方式进行数据更新", + cover: "覆盖", + increase: "追加", + deactivateTip: "您确定要停用该经验库吗?", + activeTip: "您确定要启用该经验库吗?", + label: { + experience: "经验", + contents: "经验内容", + content: "内容", + }, + placeholder: { + experience: "请输入经验", + content: "请输入内容", + }, + valid: { + experience: "经验不能为空", + content: "内容不能为空", + }, + }, }; diff --git a/EdgeCraftRAG/ui/vue/src/layout/Header.vue b/EdgeCraftRAG/ui/vue/src/layout/Header.vue index 33d0038d62..0de80ece3d 100644 --- a/EdgeCraftRAG/ui/vue/src/layout/Header.vue +++ b/EdgeCraftRAG/ui/vue/src/layout/Header.vue @@ -2,7 +2,7 @@
- +
- + +
+ + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ExperienceDetail.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ExperienceDetail.vue new file mode 100644 index 0000000000..24a1f937ec --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ExperienceDetail.vue @@ -0,0 +1,225 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ImportDialog.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ImportDialog.vue new file mode 100644 index 0000000000..b558bac195 --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ImportDialog.vue @@ -0,0 +1,100 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/UpdateDialog.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/UpdateDialog.vue new file mode 100644 index 0000000000..e828d47356 --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/UpdateDialog.vue @@ -0,0 +1,387 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/index.ts b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/index.ts new file mode 100644 index 0000000000..c9d1df62ed --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/index.ts @@ -0,0 +1,7 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +import UpdateDialog from "./UpdateDialog.vue"; +import ImportDialog from "./ImportDialog.vue"; + +export { UpdateDialog, ImportDialog }; diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetail.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetail.vue new file mode 100644 index 0000000000..64b13e320d --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetail.vue @@ -0,0 +1,336 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetial.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetial.vue deleted file mode 100644 index 290a85cd8e..0000000000 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetial.vue +++ /dev/null @@ -1,302 +0,0 @@ - - - - - diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/SelectTypeDialog.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/SelectTypeDialog.vue new file mode 100644 index 0000000000..8b9ab7978e --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/SelectTypeDialog.vue @@ -0,0 +1,134 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/UpdateDialog.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/UpdateDialog.vue index f987ff5cb4..e95e7436f6 100644 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/UpdateDialog.vue +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/UpdateDialog.vue @@ -17,14 +17,45 @@ autocomplete="off" :label-col="{ style: { width: '100px' } }" > - + + + {{ $t("knowledge.original") }} + {{ $t("knowledge.kbadmin") }} + + + + + + {{ + item + }} + + - + {{ $t("pipeline.activated") }} {{ $t("pipeline.inactive") }} @@ -59,10 +90,11 @@ import { requestKnowledgeBaseCreate, requestKnowledgeBaseUpdate, + getkbadminList, } from "@/api/knowledgeBase"; import { isValidName } from "@/utils/validate"; import { FormInstance } from "ant-design-vue"; -import { computed, ref } from "vue"; +import { computed, ref, onMounted } from "vue"; import { useI18n } from "vue-i18n"; const props = defineProps({ @@ -74,11 +106,17 @@ const props = defineProps({ type: String, default: "create", }, + dialogFlag: { + type: String, + default: "knowledge", + }, }); interface FormType { - name: string; + name: string | undefined; description: string; + comp_type: string; active: boolean; + comp_subtype: string; } const validateName = async (rule: any, value: string) => { @@ -89,7 +127,6 @@ const validateName = async (rule: any, value: string) => { if (len < 2 || len > 30) { return Promise.reject(t("knowledge.nameValid2")); } - console.log(isValidName(value)); if (!isValidName(value)) { return Promise.reject(t("knowledge.nameValid3")); } @@ -98,9 +135,11 @@ const validateName = async (rule: any, value: string) => { const { t } = useI18n(); const emit = defineEmits(["close", "switch"]); +const { dialogFlag } = props; + const typeMap = { - create: t("knowledge.create"), - edit: t("knowledge.edit"), + create: t(`${dialogFlag}.create`), + edit: t(`${dialogFlag}.edit`), } as const; const dialogTitle = computed(() => { return typeMap[props.dialogType as keyof typeof typeMap]; @@ -108,20 +147,39 @@ const dialogTitle = computed(() => { const isEdit = computed(() => { return props.dialogType === "edit"; }); -const isActivated = computed(() => { - return props.dialogData?.active; +const isExperience = computed(() => { + return props.dialogFlag === "experience"; +}); + +const isOriginal = computed(() => { + return form.comp_subtype === "origin_kb"; }); const modelVisible = ref(true); const submitLoading = ref(false); const formRef = ref(); -const { name = "", description = "", active = false } = props.dialogData; +const { + comp_subtype = "origin_kb", + name = undefined, + description = "", + active = false, + experience_active = false, +} = props.dialogData; const form = reactive({ - name, + comp_subtype, + name: isExperience.value ? "Experience" : name, description, - active, + comp_type: dialogFlag, + active: isExperience.value ? experience_active : active, }); - -const rules = reactive({ +const kbList = ref([]); +const rules: FormRules = reactive({ + comp_subtype: [ + { + required: true, + message: t("knowledge.typeValid"), + trigger: "change", + }, + ], name: [ { required: true, @@ -129,6 +187,13 @@ const rules = reactive({ trigger: ["blur", "change"], }, ], + kbName: [ + { + required: true, + message: t("knowledge.nameRequired"), + trigger: "change", + }, + ], active: [ { required: true, @@ -137,17 +202,36 @@ const rules = reactive({ }, ], }); +const handleTypeChange = () => { + form.name = undefined; +}; +const queryKbadmin = async () => { + const data: any = await getkbadminList(); + kbList.value = [].concat(data); +}; +// Format parameter +const formatFormParam = () => { + const { name, description, comp_type, active, comp_subtype } = form; + return { + name, + description, + comp_type, + comp_subtype: !isExperience.value ? comp_subtype : undefined, + active: !isExperience.value ? active : undefined, + experience_active: isExperience.value ? active : undefined, + }; +}; // Submit const handleSubmit = () => { formRef.value?.validate().then(() => { submitLoading.value = true; const { name } = form; - const apiUrl = - props.dialogType === "edit" - ? requestKnowledgeBaseUpdate - : requestKnowledgeBaseCreate; - apiUrl(form) + const apiUrl = isEdit.value + ? requestKnowledgeBaseUpdate + : requestKnowledgeBaseCreate; + + apiUrl(formatFormParam()) .then(() => { emit("switch", name); handleCancel(); @@ -165,6 +249,9 @@ const handleSubmit = () => { const handleCancel = () => { emit("close"); }; +onMounted(() => { + queryKbadmin(); +}); diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/ConfigDrawer.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/ConfigDrawer.vue index fe94bd86e4..8e1aaa2ba6 100644 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/ConfigDrawer.vue +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/ConfigDrawer.vue @@ -18,7 +18,13 @@ class="form-wrap" >
-
{{ $t("generation.retriever") }}
+
+

{{ $t("generation.retriever") }}

+
+ {{ $t("generation.tips") }} +
+
+
{{ $t("generation.desc.top_n") }}
+ + +
+ {{ $t("pipeline.desc.topk") }} +
+
{{ $t("generation.title") }}
@@ -128,7 +149,10 @@ diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts b/EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts index 6f853480c3..044af6ad9f 100644 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts @@ -19,6 +19,7 @@ export interface ThinkType { } export interface ConfigType { top_n: number; + k: number; temperature: number; top_p: number; top_k: number; diff --git a/EdgeCraftRAG/ui/vue/src/views/error/404.vue b/EdgeCraftRAG/ui/vue/src/views/error/404.vue index b1d3e0df30..ce18c7340a 100644 --- a/EdgeCraftRAG/ui/vue/src/views/error/404.vue +++ b/EdgeCraftRAG/ui/vue/src/views/error/404.vue @@ -15,8 +15,6 @@ import notFound from "@/assets/svgs/404-icon.svg"; import router from "@/router"; import { HomeFilled } from "@ant-design/icons-vue"; -import { h } from "vue"; - const handleGoHome = () => { router.push("/"); }; diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/QuickStart.vue b/EdgeCraftRAG/ui/vue/src/views/main/QuickStart.vue similarity index 100% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/QuickStart.vue rename to EdgeCraftRAG/ui/vue/src/views/main/QuickStart.vue diff --git a/EdgeCraftRAG/ui/vue/src/views/main/index.vue b/EdgeCraftRAG/ui/vue/src/views/main/index.vue index d819f75d0a..2c6a416d07 100644 --- a/EdgeCraftRAG/ui/vue/src/views/main/index.vue +++ b/EdgeCraftRAG/ui/vue/src/views/main/index.vue @@ -58,7 +58,8 @@ + + diff --git a/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/components/Table.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/components/Table.vue new file mode 100644 index 0000000000..48764d1b53 --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/components/Table.vue @@ -0,0 +1,204 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/components/UpdateDialog.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/components/UpdateDialog.vue new file mode 100644 index 0000000000..09f2dc9bd5 --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/components/UpdateDialog.vue @@ -0,0 +1,382 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/components/index.ts b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/components/index.ts new file mode 100644 index 0000000000..cc6876330d --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/components/index.ts @@ -0,0 +1,8 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +import Table from "./Table.vue"; +import UpdateDialog from "./UpdateDialog.vue"; +import DynamicConfigs from "./DynamicConfigs.vue"; + +export { Table, UpdateDialog, DynamicConfigs }; diff --git a/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/enum.ts b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/enum.ts new file mode 100644 index 0000000000..b9f2afa271 --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/enum.ts @@ -0,0 +1,13 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +export const AgentType = [ + { + name: "Simple", + value: "simple", + }, + { + name: "Deep Search", + value: "deep_search", + }, +] as const; diff --git a/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/index.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/index.vue new file mode 100644 index 0000000000..47da93d7ed --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/index.vue @@ -0,0 +1,74 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/type.ts b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/type.ts new file mode 100644 index 0000000000..1ceaa77944 --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Agent/type.ts @@ -0,0 +1,10 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +export interface ModelType { + model_id: string | undefined; + model_path: string; + model_url?: string; + device: string; + weight?: string; +} diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/columnsList.ts b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/columnsList.ts similarity index 100% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/columnsList.ts rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/columnsList.ts diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/DetailDrawer.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/DetailDrawer.vue similarity index 98% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/DetailDrawer.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/DetailDrawer.vue index fb54741f73..a2aca72f05 100644 --- a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/DetailDrawer.vue +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/DetailDrawer.vue @@ -217,7 +217,9 @@ {{ $t("pipeline.config.generatorType") }} - chatqna + + {{ formData.generator.generator_type }} +
  • {{ $t("pipeline.config.llm") }} diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/ImportDialog.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/ImportDialog.vue similarity index 100% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/ImportDialog.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/ImportDialog.vue diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/Table.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/Table.vue similarity index 92% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/Table.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/Table.vue index d925ca8df4..2f097cf86f 100644 --- a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/Table.vue +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/Table.vue @@ -93,6 +93,14 @@ @click="handleDelete(record)" >{{ $t("common.delete") }} + {{ $t("common.export") }} import { + getPipelineDetailByName, requestPipelineDelete, requestPipelineSwitchState, } from "@/api/pipeline"; @@ -133,7 +142,8 @@ import { Modal } from "ant-design-vue"; import { createVNode, h, ref } from "vue"; import { useI18n } from "vue-i18n"; import TableColumns from "@/components/TableColumns.vue"; -import getTableColumns from "./columnsList"; +import { downloadJson } from "@/utils/common"; +import getTableColumns from "../columnsList"; const { t } = useI18n(); const pipelineStore = pipelineAppStore(); @@ -209,6 +219,13 @@ const handleDelete = (row: EmptyObjectType) => { }, }); }; +// export +const handleExport = async (row: EmptyObjectType) => { + const data: any = await getPipelineDetailByName(row.name); + const pipelineJson = JSON.parse(data); + + downloadJson(data, pipelineJson.name); +}; const handleColumnChange = (checkedColumns: TableColumns[]) => { tableColumns.value = [...checkedColumns]; }; @@ -238,7 +255,6 @@ watch( .table-container { .p-16; .pb-24; - .mt-20; border-radius: 8px; background-color: var(--bg-content-color); diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/Activated.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/Activated.vue similarity index 100% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/Activated.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/Activated.vue diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/Basic.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/Basic.vue similarity index 100% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/Basic.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/Basic.vue diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/CreateDialog.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/CreateDialog.vue similarity index 100% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/CreateDialog.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/CreateDialog.vue diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/EditDialog.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/EditDialog.vue similarity index 100% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/EditDialog.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/EditDialog.vue diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/Generator.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/Generator.vue similarity index 79% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/Generator.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/Generator.vue index 63e24138b2..bf378c210b 100644 --- a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/Generator.vue +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/Generator.vue @@ -36,31 +36,11 @@ + +
    + + {{ item }} + + + + {{ $t("pipeline.desc.test") }} + +
    + +
    + + + + + + + + + +
    ({ + topn: { + 1: "1", + 100: "100", + }, +}); const postProcessorList = PostProcessor; const modelList = ref([]); const deviceList = ref([]); @@ -195,6 +229,9 @@ const handleTypeChange = (value: SelectValue, row: EmptyObjectType) => { }); } }; +const handleTopnChange = () => { + formRef.value?.validateFields(["topn"]); +}; // Handling Model Folding Events const handleModelVisible = async (visible: boolean) => { if (visible) { diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/Retriever.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/Retriever.vue similarity index 98% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/Retriever.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/Retriever.vue index 9d22ccd08b..7f77011cd6 100644 --- a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/Retriever.vue +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/Retriever.vue @@ -43,14 +43,14 @@ @@ -144,7 +144,7 @@ const optionIntroduction = computed(() => { const sliderMarks = reactive({ retrieval: { 1: "1", - 200: "200", + 500: "500", }, }); const handleTypeChange = (value: SelectValue) => { diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/index.ts b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/index.ts similarity index 100% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/UpdateDialog/index.ts rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/UpdateDialog/index.ts diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/index.ts b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/index.ts similarity index 58% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/index.ts rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/index.ts index 275d4eee6c..86e2ee7b24 100644 --- a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/index.ts +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/components/index.ts @@ -3,11 +3,8 @@ import DetailDrawer from "./DetailDrawer.vue"; import ImportDialog from "./ImportDialog.vue"; -import QuickStart from "./QuickStart.vue"; -import System from "./System.vue"; -import SystemChart from "./SystemChart.vue"; import Table from "./Table.vue"; import CreateDialog from "./UpdateDialog/CreateDialog.vue"; import EditDialog from "./UpdateDialog/EditDialog.vue"; -export { CreateDialog, DetailDrawer, EditDialog, ImportDialog, QuickStart, System, SystemChart, Table }; +export { CreateDialog, DetailDrawer, EditDialog, ImportDialog, Table }; diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/enum.ts b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/enum.ts similarity index 96% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/enum.ts rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/enum.ts index 0c1aadf6e8..b2f8a9598e 100644 --- a/EdgeCraftRAG/ui/vue/src/views/pipeline/enum.ts +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/enum.ts @@ -89,7 +89,11 @@ export const PostProcessor = [ export const Generator = [ { - name: "Chatqna", + name: "ChatQnA", value: "chatqna", }, + { + name: "FreeChat", + value: "freechat", + }, ] as const; diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/index.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/index.vue similarity index 59% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/index.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/index.vue index f4440fd90a..c68b8b0055 100644 --- a/EdgeCraftRAG/ui/vue/src/views/pipeline/index.vue +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/index.vue @@ -1,19 +1,5 @@ - diff --git a/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/type.ts b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/type.ts new file mode 100644 index 0000000000..1ceaa77944 --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/Pipeline/type.ts @@ -0,0 +1,10 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +export interface ModelType { + model_id: string | undefined; + model_path: string; + model_url?: string; + device: string; + weight?: string; +} diff --git a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/SystemChart.vue b/EdgeCraftRAG/ui/vue/src/views/settings/components/System/SystemChart.vue similarity index 98% rename from EdgeCraftRAG/ui/vue/src/views/pipeline/components/SystemChart.vue rename to EdgeCraftRAG/ui/vue/src/views/settings/components/System/SystemChart.vue index ccc6ff57f3..82bf8eb969 100644 --- a/EdgeCraftRAG/ui/vue/src/views/pipeline/components/SystemChart.vue +++ b/EdgeCraftRAG/ui/vue/src/views/settings/components/System/SystemChart.vue @@ -1,5 +1,5 @@