diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
new file mode 100644
index 00000000000..7f5566fb979
--- /dev/null
+++ b/.devcontainer/Dockerfile
@@ -0,0 +1,5 @@
+FROM node:18-bullseye
+
+RUN useradd -m -s /bin/bash vscode
+RUN mkdir -p /workspaces && chown -R vscode:vscode /workspaces
+WORKDIR /workspaces
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index ebfd2685ee6..a3bb7805501 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -13,5 +13,6 @@
}
},
"postCreateCommand": "",
- "features": { "ghcr.io/devcontainers/features/git:1": {} }
+ "features": { "ghcr.io/devcontainers/features/git:1": {} },
+ "remoteUser": "vscode"
}
diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml
index c67fca63019..277ac84f856 100644
--- a/.devcontainer/docker-compose.yml
+++ b/.devcontainer/docker-compose.yml
@@ -2,7 +2,9 @@ version: "3.8"
services:
app:
- image: node:19-bullseye
+ build:
+ context: ..
+ dockerfile: .devcontainer/Dockerfile
# restart: always
links:
- mongodb
@@ -30,8 +32,8 @@ services:
# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
# (Adding the "ports" property to this file will not forward from a Codespace.)
- # Uncomment the next line to use a non-root user for all processes - See https://aka.ms/vscode-remote/containers/non-root for details.
- # user: vscode
+ # Use a non-root user for all processes - See https://aka.ms/vscode-remote/containers/non-root for details.
+ user: vscode
# Overrides default command so things don't shut down after the process ends.
command: /bin/sh -c "while sleep 1000; do :; done"
diff --git a/.env.example b/.env.example
index 2e23a09a349..bd212cc7baa 100644
--- a/.env.example
+++ b/.env.example
@@ -1,21 +1,18 @@
-#=============================================================#
-# LibreChat Configuration #
-#=============================================================#
-# Please refer to the reference documentation for assistance #
-# with configuring your LibreChat environment. The guide is #
-# available both online and within your local LibreChat #
-# directory: #
-# Online: https://docs.librechat.ai/install/dotenv.html #
-# Locally: ./docs/install/dotenv.md #
-#=============================================================#
+#=====================================================================#
+# LibreChat Configuration #
+#=====================================================================#
+# Please refer to the reference documentation for assistance #
+# with configuring your LibreChat environment. The guide is #
+# available both online and within your local LibreChat #
+# directory: #
+# Online: https://docs.librechat.ai/install/configuration/dotenv.html #
+# Locally: ./docs/install/configuration/dotenv.md #
+#=====================================================================#
#==================================================#
# Server Configuration #
#==================================================#
-APP_TITLE=LibreChat
-# CUSTOM_FOOTER="My custom footer"
-
HOST=localhost
PORT=3080
@@ -26,6 +23,13 @@ DOMAIN_SERVER=http://localhost:3080
NO_INDEX=true
+#===============#
+# JSON Logging #
+#===============#
+
+# Use when process console logs in cloud deployment like GCP/AWS
+CONSOLE_JSON=false
+
#===============#
# Debug Logging #
#===============#
@@ -40,38 +44,62 @@ DEBUG_CONSOLE=false
# UID=1000
# GID=1000
+#===============#
+# Configuration #
+#===============#
+# Use an absolute path, a relative path, or a URL
+
+# CONFIG_PATH="/alternative/path/to/librechat.yaml"
+
#===================================================#
# Endpoints #
#===================================================#
-# ENDPOINTS=openAI,azureOpenAI,bingAI,chatGPTBrowser,google,gptPlugins,anthropic
+# ENDPOINTS=openAI,assistants,azureOpenAI,bingAI,google,gptPlugins,anthropic
PROXY=
+#===================================#
+# Known Endpoints - librechat.yaml #
+#===================================#
+# https://docs.librechat.ai/install/configuration/ai_endpoints.html
+
+# GROQ_API_KEY=
+# SHUTTLEAI_KEY=
+# OPENROUTER_KEY=
+# MISTRAL_API_KEY=
+# ANYSCALE_API_KEY=
+# FIREWORKS_API_KEY=
+# PERPLEXITY_API_KEY=
+# TOGETHERAI_API_KEY=
+
#============#
# Anthropic #
#============#
ANTHROPIC_API_KEY=user_provided
-ANTHROPIC_MODELS=claude-1,claude-instant-1,claude-2
+# ANTHROPIC_MODELS=claude-3-opus-20240229,claude-3-sonnet-20240229,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
# Azure #
#============#
-# AZURE_API_KEY=
-AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4
-# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo
-# PLUGINS_USE_AZURE="true"
-AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
+# Note: these variables are DEPRECATED
+# Use the `librechat.yaml` configuration for `azureOpenAI` instead
+# You may also continue to use them if you opt out of using the `librechat.yaml` configuration
-# AZURE_OPENAI_API_INSTANCE_NAME=
-# AZURE_OPENAI_API_DEPLOYMENT_NAME=
-# AZURE_OPENAI_API_VERSION=
-# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME=
-# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=
+# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # Deprecated
+# AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # Deprecated
+# AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE # Deprecated
+# AZURE_API_KEY= # Deprecated
+# AZURE_OPENAI_API_INSTANCE_NAME= # Deprecated
+# AZURE_OPENAI_API_DEPLOYMENT_NAME= # Deprecated
+# AZURE_OPENAI_API_VERSION= # Deprecated
+# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
+# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
+# PLUGINS_USE_AZURE="true" # Deprecated
#============#
# BingAI #
@@ -80,14 +108,6 @@ AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
BINGAI_TOKEN=user_provided
# BINGAI_HOST=https://cn.bing.com
-#============#
-# ChatGPT #
-#============#
-
-CHATGPT_TOKEN=
-CHATGPT_MODELS=text-davinci-002-render-sha
-# CHATGPT_REVERSE_PROXY=
-
#============#
# Google #
#============#
@@ -101,7 +121,7 @@ GOOGLE_KEY=user_provided
#============#
OPENAI_API_KEY=user_provided
-# OPENAI_MODELS=gpt-3.5-turbo-1106,gpt-4-1106-preview,gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
+# OPENAI_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
@@ -115,7 +135,15 @@ DEBUG_OPENAI=false
# OPENAI_REVERSE_PROXY=
-# OPENAI_ORGANIZATION=
+# OPENAI_ORGANIZATION=
+
+#====================#
+# Assistants API #
+#====================#
+
+ASSISTANTS_API_KEY=user_provided
+# ASSISTANTS_BASE_URL=
+# ASSISTANTS_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
#============#
# OpenRouter #
@@ -127,7 +155,7 @@ DEBUG_OPENAI=false
# Plugins #
#============#
-# PLUGIN_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
+# PLUGIN_MODELS=gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
DEBUG_PLUGINS=true
@@ -147,20 +175,20 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# DALL·E
#----------------
-# DALLE_API_KEY= # Key for both DALL-E-2 and DALL-E-3
-# DALLE3_API_KEY= # Key for DALL-E-3 only
-# DALLE2_API_KEY= # Key for DALL-E-2 only
-# DALLE3_SYSTEM_PROMPT="Your DALL-E-3 System Prompt here"
-# DALLE2_SYSTEM_PROMPT="Your DALL-E-2 System Prompt here"
-# DALLE_REVERSE_PROXY= # Reverse proxy for DALL-E-2 and DALL-E-3
-# DALLE3_BASEURL= # Base URL for DALL-E-3
-# DALLE2_BASEURL= # Base URL for DALL-E-2
+# DALLE_API_KEY=
+# DALLE3_API_KEY=
+# DALLE2_API_KEY=
+# DALLE3_SYSTEM_PROMPT=
+# DALLE2_SYSTEM_PROMPT=
+# DALLE_REVERSE_PROXY=
+# DALLE3_BASEURL=
+# DALLE2_BASEURL=
# DALL·E (via Azure OpenAI)
# Note: requires some of the variables above to be set
#----------------
-# DALLE3_AZURE_API_VERSION= # Azure OpenAI API version for DALL-E-3
-# DALLE2_AZURE_API_VERSION= # Azure OpenAI API versiion for DALL-E-2
+# DALLE3_AZURE_API_VERSION=
+# DALLE2_AZURE_API_VERSION=
# Google
#-----------------
@@ -175,6 +203,14 @@ SERPAPI_API_KEY=
#-----------------
SD_WEBUI_URL=http://host.docker.internal:7860
+# Tavily
+#-----------------
+TAVILY_API_KEY=
+
+# Traversaal
+#-----------------
+TRAVERSAAL_API_KEY=
+
# WolframAlpha
#-----------------
WOLFRAM_APP_ID=
@@ -202,7 +238,7 @@ MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
OPENAI_MODERATION=false
OPENAI_MODERATION_API_KEY=
-# OPENAI_MODERATION_REVERSE_PROXY=not working with some reverse proxys
+# OPENAI_MODERATION_REVERSE_PROXY=
BAN_VIOLATIONS=true
BAN_DURATION=1000 * 60 * 60 * 2
@@ -230,6 +266,8 @@ LIMIT_MESSAGE_USER=false
MESSAGE_USER_MAX=40
MESSAGE_USER_WINDOW=1
+ILLEGAL_MODEL_REQ_SCORE=5
+
#========================#
# Balance #
#========================#
@@ -278,6 +316,9 @@ OPENID_ISSUER=
OPENID_SESSION_SECRET=
OPENID_SCOPE="openid profile email"
OPENID_CALLBACK_URL=/oauth/openid/callback
+OPENID_REQUIRED_ROLE=
+OPENID_REQUIRED_ROLE_TOKEN_KIND=
+OPENID_REQUIRED_ROLE_PARAMETER_PATH=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=
@@ -286,15 +327,15 @@ OPENID_IMAGE_URL=
# Email Password Reset #
#========================#
-EMAIL_SERVICE=
-EMAIL_HOST=
-EMAIL_PORT=25
-EMAIL_ENCRYPTION=
-EMAIL_ENCRYPTION_HOSTNAME=
-EMAIL_ALLOW_SELFSIGNED=
-EMAIL_USERNAME=
-EMAIL_PASSWORD=
-EMAIL_FROM_NAME=
+EMAIL_SERVICE=
+EMAIL_HOST=
+EMAIL_PORT=25
+EMAIL_ENCRYPTION=
+EMAIL_ENCRYPTION_HOSTNAME=
+EMAIL_ALLOW_SELFSIGNED=
+EMAIL_USERNAME=
+EMAIL_PASSWORD=
+EMAIL_FROM_NAME=
EMAIL_FROM=noreply@librechat.ai
#========================#
@@ -308,6 +349,16 @@ FIREBASE_STORAGE_BUCKET=
FIREBASE_MESSAGING_SENDER_ID=
FIREBASE_APP_ID=
+#===================================================#
+# UI #
+#===================================================#
+
+APP_TITLE=LibreChat
+# CUSTOM_FOOTER="My custom footer"
+HELP_AND_FAQ_URL=https://librechat.ai
+
+# SHOW_BIRTHDAY_ICON=true
+
#==================================================#
# Others #
#==================================================#
diff --git a/.eslintrc.js b/.eslintrc.js
index a3d71acd69f..e85e0d768ca 100644
--- a/.eslintrc.js
+++ b/.eslintrc.js
@@ -19,6 +19,7 @@ module.exports = {
'e2e/playwright-report/**/*',
'packages/data-provider/types/**/*',
'packages/data-provider/dist/**/*',
+ 'packages/data-provider/test_bundle/**/*',
'data-node/**/*',
'meili_data/**/*',
'node_modules/**/*',
@@ -131,6 +132,12 @@ module.exports = {
},
],
},
+ {
+ files: ['./packages/data-provider/specs/**/*.ts'],
+ parserOptions: {
+ project: './packages/data-provider/tsconfig.spec.json',
+ },
+ },
],
settings: {
react: {
diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md
index 3f39cc00b3b..cb767cbd7cd 100644
--- a/.github/CODE_OF_CONDUCT.md
+++ b/.github/CODE_OF_CONDUCT.md
@@ -60,7 +60,7 @@ representative at an online or offline event.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement here on GitHub or
-on the official [Discord Server](https://discord.gg/uDyZ5Tzhct).
+on the official [Discord Server](https://discord.librechat.ai).
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 36618437fab..142f67c953f 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -8,7 +8,7 @@ If the feature you would like to contribute has not already received prior appro
Please note that a pull request involving a feature that has not been reviewed and approved by the project maintainers may be rejected. We appreciate your understanding and cooperation.
-If you would like to discuss the changes you wish to make, join our [Discord community](https://discord.gg/uDyZ5Tzhct), where you can engage with other contributors and seek guidance from the community.
+If you would like to discuss the changes you wish to make, join our [Discord community](https://discord.librechat.ai), where you can engage with other contributors and seek guidance from the community.
## Our Standards
diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml
index b6b64c3f2de..5c88b9f70dc 100644
--- a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml
+++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml
@@ -50,7 +50,7 @@ body:
id: terms
attributes:
label: Code of Conduct
- description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/CODE_OF_CONDUCT.md)
+ description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true
diff --git a/.github/SECURITY.md b/.github/SECURITY.md
index bd105f2526c..b01e04e0160 100644
--- a/.github/SECURITY.md
+++ b/.github/SECURITY.md
@@ -12,7 +12,7 @@ When reporting a security vulnerability, you have the following options to reach
- **Option 2: GitHub Issues**: You can initiate first contact via GitHub Issues. However, please note that initial contact through GitHub Issues should not include any sensitive details.
-- **Option 3: Discord Server**: You can join our [Discord community](https://discord.gg/5rbRxn4uME) and initiate first contact in the `#issues` channel. However, please ensure that initial contact through Discord does not include any sensitive details.
+- **Option 3: Discord Server**: You can join our [Discord community](https://discord.librechat.ai) and initiate first contact in the `#issues` channel. However, please ensure that initial contact through Discord does not include any sensitive details.
_After the initial contact, we will establish a private communication channel for further discussion._
@@ -39,11 +39,11 @@ Please note that as a security-conscious community, we may not always disclose d
This security policy applies to the following GitHub repository:
-- Repository: [LibreChat](https://github.com/danny-avila/LibreChat)
+- Repository: [LibreChat](https://github.librechat.ai)
## Contact
-If you have any questions or concerns regarding the security of our project, please join our [Discord community](https://discord.gg/NGaa9RPCft) and report them in the appropriate channel. You can also reach out to us by [opening an issue](https://github.com/danny-avila/LibreChat/issues/new) on GitHub. Please note that the response time may vary depending on the nature and severity of the inquiry.
+If you have any questions or concerns regarding the security of our project, please join our [Discord community](https://discord.librechat.ai) and report them in the appropriate channel. You can also reach out to us by [opening an issue](https://github.com/danny-avila/LibreChat/issues/new) on GitHub. Please note that the response time may vary depending on the nature and severity of the inquiry.
## Acknowledgments
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 06d2656bd64..a1542cb76e4 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -15,8 +15,9 @@ Please delete any irrelevant options.
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update
-- [ ] Documentation update
- [ ] Translation update
+- [ ] Documentation update
+
## Testing
@@ -26,6 +27,8 @@ Please describe your test process and include instructions so that we can reprod
## Checklist
+Please delete any irrelevant options.
+
- [ ] My code adheres to this project's style guidelines
- [ ] I have performed a self-review of my own code
- [ ] I have commented in any complex areas of my code
@@ -34,3 +37,4 @@ Please describe your test process and include instructions so that we can reprod
- [ ] I have written tests demonstrating that my changes are effective or that my feature works
- [ ] Local unit tests pass with my changes
- [ ] Any changes dependent on mine have been merged and published in downstream modules.
+- [ ] New documents have been locally validated with mkdocs
diff --git a/.github/workflows/backend-review.yml b/.github/workflows/backend-review.yml
index fddb6cdac63..db46653c651 100644
--- a/.github/workflows/backend-review.yml
+++ b/.github/workflows/backend-review.yml
@@ -30,10 +30,28 @@ jobs:
- name: Install Data Provider
run: npm run build:data-provider
+
+ - name: Create empty auth.json file
+ run: |
+ mkdir -p api/data
+ echo '{}' > api/data/auth.json
+
+ - name: Check for Circular dependency in rollup
+ working-directory: ./packages/data-provider
+ run: |
+ output=$(npm run rollup:api)
+ echo "$output"
+ if echo "$output" | grep -q "Circular dependency"; then
+ echo "Error: Circular dependency detected!"
+ exit 1
+ fi
- name: Run unit tests
run: cd api && npm run test:ci
+ - name: Run librechat-data-provider unit tests
+ run: cd packages/data-provider && npm run test:ci
+
- name: Run linters
uses: wearerequired/lint-action@v2
with:
diff --git a/.github/workflows/container.yml b/.github/workflows/container.yml
deleted file mode 100644
index 23c6ad48cc8..00000000000
--- a/.github/workflows/container.yml
+++ /dev/null
@@ -1,83 +0,0 @@
-name: Docker Compose Build on Tag
-
-# The workflow is triggered when a tag is pushed
-on:
- push:
- tags:
- - "*"
-
-jobs:
- build:
- runs-on: ubuntu-latest
-
- steps:
- # Check out the repository
- - name: Checkout
- uses: actions/checkout@v4
-
- # Set up Docker
- - name: Set up Docker
- uses: docker/setup-buildx-action@v3
-
- # Set up QEMU for cross-platform builds
- - name: Set up QEMU
- uses: docker/setup-qemu-action@v3
-
- # Log in to GitHub Container Registry
- - name: Log in to GitHub Container Registry
- uses: docker/login-action@v2
- with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
-
- # Prepare Docker Build
- - name: Build Docker images
- run: |
- cp .env.example .env
-
- # Tag and push librechat-api
- - name: Docker metadata for librechat-api
- id: meta-librechat-api
- uses: docker/metadata-action@v5
- with:
- images: |
- ghcr.io/${{ github.repository_owner }}/librechat-api
- tags: |
- type=raw,value=latest
- type=semver,pattern={{version}}
- type=semver,pattern={{major}}
- type=semver,pattern={{major}}.{{minor}}
-
- - name: Build and librechat-api
- uses: docker/build-push-action@v5
- with:
- file: Dockerfile.multi
- context: .
- push: true
- tags: ${{ steps.meta-librechat-api.outputs.tags }}
- platforms: linux/amd64,linux/arm64
- target: api-build
-
- # Tag and push librechat
- - name: Docker metadata for librechat
- id: meta-librechat
- uses: docker/metadata-action@v5
- with:
- images: |
- ghcr.io/${{ github.repository_owner }}/librechat
- tags: |
- type=raw,value=latest
- type=semver,pattern={{version}}
- type=semver,pattern={{major}}
- type=semver,pattern={{major}}.{{minor}}
-
- - name: Build and librechat
- uses: docker/build-push-action@v5
- with:
- file: Dockerfile
- context: .
- push: true
- tags: ${{ steps.meta-librechat.outputs.tags }}
- platforms: linux/amd64,linux/arm64
- target: node
diff --git a/.github/workflows/dev-images.yml b/.github/workflows/dev-images.yml
index e0149e05e9c..41d427c6c8b 100644
--- a/.github/workflows/dev-images.yml
+++ b/.github/workflows/dev-images.yml
@@ -2,18 +2,38 @@ name: Docker Dev Images Build
on:
workflow_dispatch:
+ push:
+ branches:
+ - main
+ paths:
+ - 'api/**'
+ - 'client/**'
+ - 'packages/**'
jobs:
build:
runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ include:
+ - target: api-build
+ file: Dockerfile.multi
+ image_name: librechat-dev-api
+ - target: node
+ file: Dockerfile
+ image_name: librechat-dev
steps:
# Check out the repository
- name: Checkout
uses: actions/checkout@v4
- # Set up Docker
- - name: Set up Docker
+ # Set up QEMU
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ # Set up Docker Buildx
+ - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# Log in to GitHub Container Registry
@@ -24,22 +44,29 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- # Build Docker images
- - name: Build Docker images
+ # Login to Docker Hub
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ # Prepare the environment
+ - name: Prepare environment
run: |
cp .env.example .env
- docker build -f Dockerfile.multi --target api-build -t librechat-dev-api .
- docker build -f Dockerfile -t librechat-dev .
- # Tag and push the images to GitHub Container Registry
- - name: Tag and push images
- run: |
- docker tag librechat-dev-api:latest ghcr.io/${{ github.repository_owner }}/librechat-dev-api:${{ github.sha }}
- docker push ghcr.io/${{ github.repository_owner }}/librechat-dev-api:${{ github.sha }}
- docker tag librechat-dev-api:latest ghcr.io/${{ github.repository_owner }}/librechat-dev-api:latest
- docker push ghcr.io/${{ github.repository_owner }}/librechat-dev-api:latest
-
- docker tag librechat-dev:latest ghcr.io/${{ github.repository_owner }}/librechat-dev:${{ github.sha }}
- docker push ghcr.io/${{ github.repository_owner }}/librechat-dev:${{ github.sha }}
- docker tag librechat-dev:latest ghcr.io/${{ github.repository_owner }}/librechat-dev:latest
- docker push ghcr.io/${{ github.repository_owner }}/librechat-dev:latest
+ # Build and push Docker images for each target
+ - name: Build and push Docker images
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ${{ matrix.file }}
+ push: true
+ tags: |
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }}
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }}
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
+ platforms: linux/amd64,linux/arm64
+ target: ${{ matrix.target }}
diff --git a/.github/workflows/generate_embeddings.yml b/.github/workflows/generate_embeddings.yml
new file mode 100644
index 00000000000..c514f9c1d6b
--- /dev/null
+++ b/.github/workflows/generate_embeddings.yml
@@ -0,0 +1,20 @@
+name: 'generate_embeddings'
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - main
+ paths:
+ - 'docs/**'
+
+jobs:
+ generate:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: supabase/embeddings-generator@v0.0.5
+ with:
+ supabase-url: ${{ secrets.SUPABASE_URL }}
+ supabase-service-role-key: ${{ secrets.SUPABASE_SERVICE_ROLE_KEY }}
+ openai-key: ${{ secrets.OPENAI_DOC_EMBEDDINGS_KEY }}
+ docs-root-path: 'docs'
\ No newline at end of file
diff --git a/.github/workflows/latest-images-main.yml b/.github/workflows/latest-images-main.yml
deleted file mode 100644
index 5149cecb0e6..00000000000
--- a/.github/workflows/latest-images-main.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-name: Docker Compose Build on Main Branch
-
-on:
- workflow_dispatch: # This line allows manual triggering
-
-jobs:
- build:
- runs-on: ubuntu-latest
-
- steps:
- # Check out the repository
- - name: Checkout
- uses: actions/checkout@v4
-
- # Set up Docker
- - name: Set up Docker
- uses: docker/setup-buildx-action@v3
-
- # Log in to GitHub Container Registry
- - name: Log in to GitHub Container Registry
- uses: docker/login-action@v2
- with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
-
- # Run docker-compose build
- - name: Build Docker images
- run: |
- cp .env.example .env
- docker-compose build
- docker build -f Dockerfile.multi --target api-build -t librechat-api .
-
- # Tag and push the images with the 'latest' tag
- - name: Tag image and push
- run: |
- docker tag librechat:latest ghcr.io/${{ github.repository_owner }}/librechat:latest
- docker push ghcr.io/${{ github.repository_owner }}/librechat:latest
- docker tag librechat-api:latest ghcr.io/${{ github.repository_owner }}/librechat-api:latest
- docker push ghcr.io/${{ github.repository_owner }}/librechat-api:latest
diff --git a/.github/workflows/main-image-workflow.yml b/.github/workflows/main-image-workflow.yml
new file mode 100644
index 00000000000..43c9d957534
--- /dev/null
+++ b/.github/workflows/main-image-workflow.yml
@@ -0,0 +1,69 @@
+name: Docker Compose Build Latest Main Image Tag (Manual Dispatch)
+
+on:
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ include:
+ - target: api-build
+ file: Dockerfile.multi
+ image_name: librechat-api
+ - target: node
+ file: Dockerfile
+ image_name: librechat
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Fetch tags and set the latest tag
+ run: |
+ git fetch --tags
+ echo "LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)" >> $GITHUB_ENV
+
+ # Set up QEMU
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ # Set up Docker Buildx
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ # Log in to GitHub Container Registry
+ - name: Log in to GitHub Container Registry
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ # Login to Docker Hub
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ # Prepare the environment
+ - name: Prepare environment
+ run: |
+ cp .env.example .env
+
+ # Build and push Docker images for each target
+ - name: Build and push Docker images
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ${{ matrix.file }}
+ push: true
+ tags: |
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }}
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }}
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
+ platforms: linux/amd64,linux/arm64
+ target: ${{ matrix.target }}
diff --git a/.github/workflows/tag-images.yml b/.github/workflows/tag-images.yml
new file mode 100644
index 00000000000..e90f43978ab
--- /dev/null
+++ b/.github/workflows/tag-images.yml
@@ -0,0 +1,67 @@
+name: Docker Images Build on Tag
+
+on:
+ push:
+ tags:
+ - '*'
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ include:
+ - target: api-build
+ file: Dockerfile.multi
+ image_name: librechat-api
+ - target: node
+ file: Dockerfile
+ image_name: librechat
+
+ steps:
+ # Check out the repository
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ # Set up QEMU
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ # Set up Docker Buildx
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ # Log in to GitHub Container Registry
+ - name: Log in to GitHub Container Registry
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ # Login to Docker Hub
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ # Prepare the environment
+ - name: Prepare environment
+ run: |
+ cp .env.example .env
+
+ # Build and push Docker images for each target
+ - name: Build and push Docker images
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ${{ matrix.file }}
+ push: true
+ tags: |
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.ref_name }}
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.ref_name }}
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
+ platforms: linux/amd64,linux/arm64
+ target: ${{ matrix.target }}
diff --git a/.gitignore b/.gitignore
index 765de5cb799..c55115988b9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,6 +50,7 @@ bower_components/
#config file
librechat.yaml
+librechat.yml
# Environment
.npmrc
@@ -74,6 +75,7 @@ src/style - official.css
config.local.ts
**/storageState.json
junit.xml
+**/.venv/
# docker override file
docker-compose.override.yaml
@@ -88,4 +90,10 @@ auth.json
/packages/ux-shared/
/images
-!client/src/components/Nav/SettingsTabs/Data/
\ No newline at end of file
+!client/src/components/Nav/SettingsTabs/Data/
+
+# User uploads
+uploads/
+
+# owner
+release/
\ No newline at end of file
diff --git a/.husky/pre-commit b/.husky/pre-commit
index af85628072b..67f5b002728 100755
--- a/.husky/pre-commit
+++ b/.husky/pre-commit
@@ -1,4 +1,4 @@
-#!/usr/bin/env sh
+#!/usr/bin/env sh
set -e
. "$(dirname -- "$0")/_/husky.sh"
[ -n "$CI" ] && exit 0
diff --git a/Dockerfile b/Dockerfile
index edc79c2497a..fd087eae39d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,20 +1,35 @@
+# v0.7.0
+
# Base node image
-FROM node:18-alpine AS node
+FROM node:18-alpine3.18 AS node
+
+RUN apk add g++ make py3-pip
+RUN npm install -g node-gyp
+RUN apk --no-cache add curl
-COPY . /app
+RUN mkdir -p /app && chown node:node /app
WORKDIR /app
+USER node
+
+COPY --chown=node:node . .
+
# Allow mounting of these files, which have no default
# values.
RUN touch .env
-# Install call deps - Install curl for health check
-RUN apk --no-cache add curl && \
- npm ci
+RUN npm config set fetch-retry-maxtimeout 600000
+RUN npm config set fetch-retries 5
+RUN npm config set fetch-retry-mintimeout 15000
+RUN npm install --no-audit
# React client build
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run frontend
+# Create directories for the volumes to inherit
+# the correct permissions
+RUN mkdir -p /app/client/public/images /app/api/logs
+
# Node API setup
EXPOSE 3080
ENV HOST=0.0.0.0
diff --git a/Dockerfile.multi b/Dockerfile.multi
index 0d5ebec5e23..00ed37e3ef8 100644
--- a/Dockerfile.multi
+++ b/Dockerfile.multi
@@ -1,3 +1,5 @@
+# v0.7.0
+
# Build API, Client and Data Provider
FROM node:20-alpine AS base
@@ -11,11 +13,12 @@ RUN npm run build
# React client build
FROM data-provider-build AS client-build
WORKDIR /app/client
-COPY ./client/ ./
+COPY ./client/package*.json ./
# Copy data-provider to client's node_modules
RUN mkdir -p /app/client/node_modules/librechat-data-provider/
RUN cp -R /app/packages/data-provider/* /app/client/node_modules/librechat-data-provider/
RUN npm install
+COPY ./client/ ./
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run build
@@ -24,6 +27,8 @@ FROM data-provider-build AS api-build
WORKDIR /app/api
COPY api/package*.json ./
COPY api/ ./
+# Copy helper scripts
+COPY config/ ./
# Copy data-provider to API's node_modules
RUN mkdir -p /app/api/node_modules/librechat-data-provider/
RUN cp -R /app/packages/data-provider/* /app/api/node_modules/librechat-data-provider/
diff --git a/README.md b/README.md
index 00cd890b073..901ddbc7c14 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,10 @@
-
+
-
- LibreChat
-
+
@@ -27,42 +27,48 @@
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
# 📃 Features
- - 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and 11-2023 updates
- - 💬 Multimodal Chat:
- - Upload and analyze images with GPT-4 and Gemini Vision 📸
- - More filetypes and Assistants API integration in Active Development 🚧
- - 🌎 Multilingual UI:
- - English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
- - Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands
- - 🤖 AI model selection: OpenAI API, Azure, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins
- - 💾 Create, Save, & Share Custom Presets
- - 🔄 Edit, Resubmit, and Continue messages with conversation branching
- - 📤 Export conversations as screenshots, markdown, text, json.
- - 🔍 Search all messages/conversations
- - 🔌 Plugins, including web access, image generation with DALL-E-3 and more
- - 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
- - ⚙️ Configure Proxy, Reverse Proxy, Docker, many Deployment options, and completely Open-Source
-[For a thorough review of our features, see our docs here](https://docs.librechat.ai/features/plugins/introduction.html) 📚
+- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and latest updates
+- 💬 Multimodal Chat:
+ - Upload and analyze images with Claude 3, GPT-4, and Gemini Vision 📸
+ - Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
+ - Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
+ - Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
+ - Non-OpenAI Agents in Active Development 🚧
+- 🌎 Multilingual UI:
+ - English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
+ - Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
+- 🤖 AI model selection: OpenAI, Azure OpenAI, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins, Assistants API (including Azure Assistants)
+- 💾 Create, Save, & Share Custom Presets
+- 🔄 Edit, Resubmit, and Continue messages with conversation branching
+- 📤 Export conversations as screenshots, markdown, text, json.
+- 🔍 Search all messages/conversations
+- 🔌 Plugins, including web access, image generation with DALL-E-3 and more
+- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
+- ⚙️ Configure Proxy, Reverse Proxy, Docker, & many Deployment options
+- 📖 Completely Open-Source & Built in Public
+- 🧑🤝🧑 Community-driven development, support, and feedback
+[For a thorough review of our features, see our docs here](https://docs.librechat.ai/features/plugins/introduction.html) 📚
## 🪶 All-In-One AI Conversations with LibreChat
+
LibreChat brings together the future of assistant AIs with the revolutionary technology of OpenAI's ChatGPT. Celebrating the original styling, LibreChat gives you the ability to integrate multiple AI models. It also integrates and enhances original client features such as conversation and message search, prompt templates and plugins.
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
-
+
[![Watch the video](https://img.youtube.com/vi/pNIOs1ovsXw/maxresdefault.jpg)](https://youtu.be/pNIOs1ovsXw)
@@ -71,11 +77,13 @@ Click on the thumbnail to open the video☝️
---
## 📚 Documentation
+
For more information on how to use our advanced features, install and configure our software, and access our guidelines and tutorials, please check out our documentation at [docs.librechat.ai](https://docs.librechat.ai)
---
-## 📝 Changelog
+## 📝 Changelog
+
Keep up with the latest updates by visiting the releases page - [Releases](https://github.com/danny-avila/LibreChat/releases)
**⚠️ [Breaking Changes](docs/general_info/breaking_changes.md)**
@@ -96,14 +104,15 @@ Please consult the breaking changes before updating.
---
## ✨ Contributions
+
Contributions, suggestions, bug reports and fixes are welcome!
-For new features, components, or extensions, please open an issue and discuss before sending a PR.
+For new features, components, or extensions, please open an issue and discuss before sending a PR.
---
-💖 This project exists in its current state thanks to all the people who contribute
----
+## 💖 This project exists in its current state thanks to all the people who contribute
+
diff --git a/api/app/chatgpt-browser.js b/api/app/chatgpt-browser.js
index 467e67785d3..818661555dc 100644
--- a/api/app/chatgpt-browser.js
+++ b/api/app/chatgpt-browser.js
@@ -1,5 +1,6 @@
require('dotenv').config();
const { KeyvFile } = require('keyv-file');
+const { Constants } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('../server/services/UserService');
const browserClient = async ({
@@ -48,7 +49,7 @@ const browserClient = async ({
options = { ...options, parentMessageId, conversationId };
}
- if (parentMessageId === '00000000-0000-0000-0000-000000000000') {
+ if (parentMessageId === Constants.NO_PARENT) {
delete options.conversationId;
}
diff --git a/api/app/clients/AnthropicClient.js b/api/app/clients/AnthropicClient.js
index 0441a49334e..6d478defab0 100644
--- a/api/app/clients/AnthropicClient.js
+++ b/api/app/clients/AnthropicClient.js
@@ -1,6 +1,19 @@
const Anthropic = require('@anthropic-ai/sdk');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
-const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
+const {
+ getResponseSender,
+ EModelEndpoint,
+ validateVisionModel,
+} = require('librechat-data-provider');
+const { encodeAndFormat } = require('~/server/services/Files/images/encode');
+const {
+ titleFunctionPrompt,
+ parseTitleFromPrompt,
+ truncateText,
+ formatMessage,
+ createContextHandlers,
+} = require('./prompts');
+const spendTokens = require('~/models/spendTokens');
const { getModelMaxTokens } = require('~/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -10,12 +23,20 @@ const AI_PROMPT = '\n\nAssistant:';
const tokenizersCache = {};
+/** Helper function to introduce a delay before retrying */
+function delayBeforeRetry(attempts, baseDelay = 1000) {
+ return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
+}
+
class AnthropicClient extends BaseClient {
constructor(apiKey, options = {}) {
super(apiKey, options);
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
this.userLabel = HUMAN_PROMPT;
this.assistantLabel = AI_PROMPT;
+ this.contextStrategy = options.contextStrategy
+ ? options.contextStrategy.toLowerCase()
+ : 'discard';
this.setOptions(options);
}
@@ -47,6 +68,12 @@ class AnthropicClient extends BaseClient {
stop: modelOptions.stop, // no stop method for now
};
+ this.isClaude3 = this.modelOptions.model.includes('claude-3');
+ this.useMessages = this.isClaude3 || !!this.options.attachments;
+
+ this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
+ this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
+
this.maxContextTokens =
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ?? 100000;
this.maxResponseTokens = this.modelOptions.maxOutputTokens || 1500;
@@ -87,7 +114,12 @@ class AnthropicClient extends BaseClient {
return this;
}
+ /**
+ * Get the initialized Anthropic client.
+ * @returns {Anthropic} The Anthropic client instance.
+ */
getClient() {
+ /** @type {Anthropic.default.RequestOptions} */
const options = {
apiKey: this.apiKey,
};
@@ -99,6 +131,75 @@ class AnthropicClient extends BaseClient {
return new Anthropic(options);
}
+ getTokenCountForResponse(response) {
+ return this.getTokenCountForMessage({
+ role: 'assistant',
+ content: response.text,
+ });
+ }
+
+ /**
+ *
+ * Checks if the model is a vision model based on request attachments and sets the appropriate options:
+ * - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
+ * - Sets `this.isVisionModel` to `true` if vision request.
+ * - Deletes `this.modelOptions.stop` if vision request.
+ * @param {MongoFile[]} attachments
+ */
+ checkVisionRequest(attachments) {
+ const availableModels = this.options.modelsConfig?.[EModelEndpoint.anthropic];
+ this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
+
+ const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
+ if (
+ attachments &&
+ attachments.some((file) => file?.type && file?.type?.includes('image')) &&
+ visionModelAvailable &&
+ !this.isVisionModel
+ ) {
+ this.modelOptions.model = this.defaultVisionModel;
+ this.isVisionModel = true;
+ }
+ }
+
+ /**
+ * Calculate the token cost in tokens for an image based on its dimensions and detail level.
+ *
+ * For reference, see: https://docs.anthropic.com/claude/docs/vision#image-costs
+ *
+ * @param {Object} image - The image object.
+ * @param {number} image.width - The width of the image.
+ * @param {number} image.height - The height of the image.
+ * @returns {number} The calculated token cost measured by tokens.
+ *
+ */
+ calculateImageTokenCost({ width, height }) {
+ return Math.ceil((width * height) / 750);
+ }
+
+ async addImageURLs(message, attachments) {
+ const { files, image_urls } = await encodeAndFormat(
+ this.options.req,
+ attachments,
+ EModelEndpoint.anthropic,
+ );
+ message.image_urls = image_urls.length ? image_urls : undefined;
+ return files;
+ }
+
+ async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) {
+ await spendTokens(
+ {
+ context,
+ user: this.user,
+ conversationId: this.conversationId,
+ model: model ?? this.modelOptions.model,
+ endpointTokenConfig: this.options.endpointTokenConfig,
+ },
+ { promptTokens, completionTokens },
+ );
+ }
+
async buildMessages(messages, parentMessageId) {
const orderedMessages = this.constructor.getMessagesForConversation({
messages,
@@ -107,28 +208,145 @@ class AnthropicClient extends BaseClient {
logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId });
- const formattedMessages = orderedMessages.map((message) => ({
- author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
- content: message?.content ?? message.text,
- }));
+ if (this.options.attachments) {
+ const attachments = await this.options.attachments;
+ const images = attachments.filter((file) => file.type.includes('image'));
+
+ if (images.length && !this.isVisionModel) {
+ throw new Error('Images are only supported with the Claude 3 family of models');
+ }
+
+ const latestMessage = orderedMessages[orderedMessages.length - 1];
+
+ if (this.message_file_map) {
+ this.message_file_map[latestMessage.messageId] = attachments;
+ } else {
+ this.message_file_map = {
+ [latestMessage.messageId]: attachments,
+ };
+ }
+
+ const files = await this.addImageURLs(latestMessage, attachments);
+
+ this.options.attachments = files;
+ }
+
+ if (this.message_file_map) {
+ this.contextHandlers = createContextHandlers(
+ this.options.req,
+ orderedMessages[orderedMessages.length - 1].text,
+ );
+ }
+
+ const formattedMessages = orderedMessages.map((message, i) => {
+ const formattedMessage = this.useMessages
+ ? formatMessage({
+ message,
+ endpoint: EModelEndpoint.anthropic,
+ })
+ : {
+ author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
+ content: message?.content ?? message.text,
+ };
+
+ const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
+ /* If tokens were never counted, or, is a Vision request and the message has files, count again */
+ if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) {
+ orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage);
+ }
+
+ /* If message has files, calculate image token cost */
+ if (this.message_file_map && this.message_file_map[message.messageId]) {
+ const attachments = this.message_file_map[message.messageId];
+ for (const file of attachments) {
+ if (file.embedded) {
+ this.contextHandlers?.processFile(file);
+ continue;
+ }
+
+ orderedMessages[i].tokenCount += this.calculateImageTokenCost({
+ width: file.width,
+ height: file.height,
+ });
+ }
+ }
+
+ formattedMessage.tokenCount = orderedMessages[i].tokenCount;
+ return formattedMessage;
+ });
+
+ if (this.contextHandlers) {
+ this.augmentedPrompt = await this.contextHandlers.createContext();
+ this.options.promptPrefix = this.augmentedPrompt + (this.options.promptPrefix ?? '');
+ }
+
+ let { context: messagesInWindow, remainingContextTokens } =
+ await this.getMessagesWithinTokenLimit(formattedMessages);
+
+ const tokenCountMap = orderedMessages
+ .slice(orderedMessages.length - messagesInWindow.length)
+ .reduce((map, message, index) => {
+ const { messageId } = message;
+ if (!messageId) {
+ return map;
+ }
+
+ map[messageId] = orderedMessages[index].tokenCount;
+ return map;
+ }, {});
+
+ logger.debug('[AnthropicClient]', {
+ messagesInWindow: messagesInWindow.length,
+ remainingContextTokens,
+ });
let lastAuthor = '';
let groupedMessages = [];
- for (let message of formattedMessages) {
+ for (let i = 0; i < messagesInWindow.length; i++) {
+ const message = messagesInWindow[i];
+ const author = message.role ?? message.author;
// If last author is not same as current author, add to new group
- if (lastAuthor !== message.author) {
- groupedMessages.push({
- author: message.author,
+ if (lastAuthor !== author) {
+ const newMessage = {
content: [message.content],
- });
- lastAuthor = message.author;
+ };
+
+ if (message.role) {
+ newMessage.role = message.role;
+ } else {
+ newMessage.author = message.author;
+ }
+
+ groupedMessages.push(newMessage);
+ lastAuthor = author;
// If same author, append content to the last group
} else {
groupedMessages[groupedMessages.length - 1].content.push(message.content);
}
}
+ groupedMessages = groupedMessages.map((msg, i) => {
+ const isLast = i === groupedMessages.length - 1;
+ if (msg.content.length === 1) {
+ const content = msg.content[0];
+ return {
+ ...msg,
+ // reason: final assistant content cannot end with trailing whitespace
+ content:
+ isLast && this.useMessages && msg.role === 'assistant' && typeof content === 'string'
+ ? content?.trim()
+ : content,
+ };
+ }
+
+ if (!this.useMessages && msg.tokenCount) {
+ delete msg.tokenCount;
+ }
+
+ return msg;
+ });
+
let identityPrefix = '';
if (this.options.userLabel) {
identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
@@ -154,9 +372,10 @@ class AnthropicClient extends BaseClient {
// Prompt AI to respond, empty if last message was from AI
let isEdited = lastAuthor === this.assistantLabel;
const promptSuffix = isEdited ? '' : `${promptPrefix}${this.assistantLabel}\n`;
- let currentTokenCount = isEdited
- ? this.getTokenCount(promptPrefix)
- : this.getTokenCount(promptSuffix);
+ let currentTokenCount =
+ isEdited || this.useMessages
+ ? this.getTokenCount(promptPrefix)
+ : this.getTokenCount(promptSuffix);
let promptBody = '';
const maxTokenCount = this.maxPromptTokens;
@@ -224,7 +443,69 @@ class AnthropicClient extends BaseClient {
return true;
};
- await buildPromptBody();
+ const messagesPayload = [];
+ const buildMessagesPayload = async () => {
+ let canContinue = true;
+
+ if (promptPrefix) {
+ this.systemMessage = promptPrefix;
+ }
+
+ while (currentTokenCount < maxTokenCount && groupedMessages.length > 0 && canContinue) {
+ const message = groupedMessages.pop();
+
+ let tokenCountForMessage = message.tokenCount ?? this.getTokenCountForMessage(message);
+
+ const newTokenCount = currentTokenCount + tokenCountForMessage;
+ const exceededMaxCount = newTokenCount > maxTokenCount;
+
+ if (exceededMaxCount && messagesPayload.length === 0) {
+ throw new Error(
+ `Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
+ );
+ } else if (exceededMaxCount) {
+ canContinue = false;
+ break;
+ }
+
+ delete message.tokenCount;
+ messagesPayload.unshift(message);
+ currentTokenCount = newTokenCount;
+
+ // Switch off isEdited after using it once
+ if (isEdited && message.role === 'assistant') {
+ isEdited = false;
+ }
+
+ // Wait for next tick to avoid blocking the event loop
+ await new Promise((resolve) => setImmediate(resolve));
+ }
+ };
+
+ const processTokens = () => {
+ // Add 2 tokens for metadata after all messages have been counted.
+ currentTokenCount += 2;
+
+ // Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
+ this.modelOptions.maxOutputTokens = Math.min(
+ this.maxContextTokens - currentTokenCount,
+ this.maxResponseTokens,
+ );
+ };
+
+ if (this.modelOptions.model.startsWith('claude-3')) {
+ await buildMessagesPayload();
+ processTokens();
+ return {
+ prompt: messagesPayload,
+ context: messagesInWindow,
+ promptTokens: currentTokenCount,
+ tokenCountMap,
+ };
+ } else {
+ await buildPromptBody();
+ processTokens();
+ }
if (nextMessage.remove) {
promptBody = promptBody.replace(nextMessage.messageString, '');
@@ -234,22 +515,26 @@ class AnthropicClient extends BaseClient {
let prompt = `${promptBody}${promptSuffix}`;
- // Add 2 tokens for metadata after all messages have been counted.
- currentTokenCount += 2;
-
- // Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
- this.modelOptions.maxOutputTokens = Math.min(
- this.maxContextTokens - currentTokenCount,
- this.maxResponseTokens,
- );
-
- return { prompt, context };
+ return { prompt, context, promptTokens: currentTokenCount, tokenCountMap };
}
getCompletion() {
logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
}
+ /**
+ * Creates a message or completion response using the Anthropic client.
+ * @param {Anthropic} client - The Anthropic client instance.
+ * @param {Anthropic.default.MessageCreateParams | Anthropic.default.CompletionCreateParams} options - The options for the message or completion.
+ * @param {boolean} useMessages - Whether to use messages or completions. Defaults to `this.useMessages`.
+ * @returns {Promise} The response from the Anthropic client.
+ */
+ async createResponse(client, options, useMessages) {
+ return useMessages ?? this.useMessages
+ ? await client.messages.create(options)
+ : await client.completions.create(options);
+ }
+
async sendCompletion(payload, { onProgress, abortController }) {
if (!abortController) {
abortController = new AbortController();
@@ -279,36 +564,88 @@ class AnthropicClient extends BaseClient {
topP: top_p,
topK: top_k,
} = this.modelOptions;
+
const requestOptions = {
- prompt: payload,
model,
stream: stream || true,
- max_tokens_to_sample: maxOutputTokens || 1500,
stop_sequences,
temperature,
metadata,
top_p,
top_k,
};
+
+ if (this.useMessages) {
+ requestOptions.messages = payload;
+ requestOptions.max_tokens = maxOutputTokens || 1500;
+ } else {
+ requestOptions.prompt = payload;
+ requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
+ }
+
+ if (this.systemMessage) {
+ requestOptions.system = this.systemMessage;
+ }
+
logger.debug('[AnthropicClient]', { ...requestOptions });
- const response = await client.completions.create(requestOptions);
- signal.addEventListener('abort', () => {
- logger.debug('[AnthropicClient] message aborted!');
- response.controller.abort();
- });
+ const handleChunk = (currentChunk) => {
+ if (currentChunk) {
+ text += currentChunk;
+ onProgress(currentChunk);
+ }
+ };
+
+ const maxRetries = 3;
+ async function processResponse() {
+ let attempts = 0;
+
+ while (attempts < maxRetries) {
+ let response;
+ try {
+ response = await this.createResponse(client, requestOptions);
+
+ signal.addEventListener('abort', () => {
+ logger.debug('[AnthropicClient] message aborted!');
+ if (response.controller?.abort) {
+ response.controller.abort();
+ }
+ });
+
+ for await (const completion of response) {
+ // Handle each completion as before
+ if (completion?.delta?.text) {
+ handleChunk(completion.delta.text);
+ } else if (completion.completion) {
+ handleChunk(completion.completion);
+ }
+ }
- for await (const completion of response) {
- // Uncomment to debug message stream
- // logger.debug(completion);
- text += completion.completion;
- onProgress(completion.completion);
+ // Successful processing, exit loop
+ break;
+ } catch (error) {
+ attempts += 1;
+ logger.warn(
+ `User: ${this.user} | Anthropic Request ${attempts} failed: ${error.message}`,
+ );
+
+ if (attempts < maxRetries) {
+ await delayBeforeRetry(attempts, 350);
+ } else {
+ throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
+ }
+ } finally {
+ signal.removeEventListener('abort', () => {
+ logger.debug('[AnthropicClient] message aborted!');
+ if (response.controller?.abort) {
+ response.controller.abort();
+ }
+ });
+ }
+ }
}
- signal.removeEventListener('abort', () => {
- logger.debug('[AnthropicClient] message aborted!');
- response.controller.abort();
- });
+ await processResponse.bind(this)();
return text.trim();
}
@@ -317,6 +654,7 @@ class AnthropicClient extends BaseClient {
return {
promptPrefix: this.options.promptPrefix,
modelLabel: this.options.modelLabel,
+ resendFiles: this.options.resendFiles,
...this.modelOptions,
};
}
@@ -342,6 +680,78 @@ class AnthropicClient extends BaseClient {
getTokenCount(text) {
return this.gptEncoder.encode(text, 'all').length;
}
+
+ /**
+ * Generates a concise title for a conversation based on the user's input text and response.
+ * Involves sending a chat completion request with specific instructions for title generation.
+ *
+ * This function capitlizes on [Anthropic's function calling training](https://docs.anthropic.com/claude/docs/functions-external-tools).
+ *
+ * @param {Object} params - The parameters for the conversation title generation.
+ * @param {string} params.text - The user's input.
+ * @param {string} [params.responseText=''] - The AI's immediate response to the user.
+ *
+ * @returns {Promise} A promise that resolves to the generated conversation title.
+ * In case of failure, it will return the default title, "New Chat".
+ */
+ async titleConvo({ text, responseText = '' }) {
+ let title = 'New Chat';
+ const convo = `
+ ${truncateText(text)}
+
+
+ ${JSON.stringify(truncateText(responseText))}
+ `;
+
+ const { ANTHROPIC_TITLE_MODEL } = process.env ?? {};
+ const model = this.options.titleModel ?? ANTHROPIC_TITLE_MODEL ?? 'claude-3-haiku-20240307';
+ const system = titleFunctionPrompt;
+
+ const titleChatCompletion = async () => {
+ const content = `
+ ${convo}
+
+
+ Please generate a title for this conversation.`;
+
+ const titleMessage = { role: 'user', content };
+ const requestOptions = {
+ model,
+ temperature: 0.3,
+ max_tokens: 1024,
+ system,
+ stop_sequences: ['\n\nHuman:', '\n\nAssistant', ''],
+ messages: [titleMessage],
+ };
+
+ try {
+ const response = await this.createResponse(this.getClient(), requestOptions, true);
+ let promptTokens = response?.usage?.input_tokens;
+ let completionTokens = response?.usage?.output_tokens;
+ if (!promptTokens) {
+ promptTokens = this.getTokenCountForMessage(titleMessage);
+ promptTokens += this.getTokenCountForMessage({ role: 'system', content: system });
+ }
+ if (!completionTokens) {
+ completionTokens = this.getTokenCountForMessage(response.content[0]);
+ }
+ await this.recordTokenUsage({
+ model,
+ promptTokens,
+ completionTokens,
+ context: 'title',
+ });
+ const text = response.content[0].text;
+ title = parseTitleFromPrompt(text);
+ } catch (e) {
+ logger.error('[AnthropicClient] There was an issue generating the title', e);
+ }
+ };
+
+ await titleChatCompletion();
+ logger.debug('[AnthropicClient] Convo Title: ' + title);
+ return title;
+ }
}
module.exports = AnthropicClient;
diff --git a/api/app/clients/BaseClient.js b/api/app/clients/BaseClient.js
index aa39084b9fa..f7ed3b9cf18 100644
--- a/api/app/clients/BaseClient.js
+++ b/api/app/clients/BaseClient.js
@@ -1,8 +1,9 @@
const crypto = require('crypto');
-const { supportsBalanceCheck } = require('librechat-data-provider');
+const { supportsBalanceCheck, Constants } = require('librechat-data-provider');
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const checkBalance = require('~/models/checkBalance');
+const { getFiles } = require('~/models/File');
const TextStream = require('./TextStream');
const { logger } = require('~/config');
@@ -22,7 +23,7 @@ class BaseClient {
throw new Error('Method \'setOptions\' must be implemented.');
}
- getCompletion() {
+ async getCompletion() {
throw new Error('Method \'getCompletion\' must be implemented.');
}
@@ -46,10 +47,6 @@ class BaseClient {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', response);
}
- async addPreviousAttachments(messages) {
- return messages;
- }
-
async recordTokenUsage({ promptTokens, completionTokens }) {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', {
promptTokens,
@@ -77,7 +74,7 @@ class BaseClient {
const saveOptions = this.getSaveOptions();
this.abortController = opts.abortController ?? new AbortController();
const conversationId = opts.conversationId ?? crypto.randomUUID();
- const parentMessageId = opts.parentMessageId ?? '00000000-0000-0000-0000-000000000000';
+ const parentMessageId = opts.parentMessageId ?? Constants.NO_PARENT;
const userMessageId = opts.overrideParentMessageId ?? crypto.randomUUID();
let responseMessageId = opts.responseMessageId ?? crypto.randomUUID();
let head = isEdited ? responseMessageId : parentMessageId;
@@ -428,7 +425,10 @@ class BaseClient {
await this.saveMessageToDatabase(userMessage, saveOptions, user);
}
- if (isEnabled(process.env.CHECK_BALANCE) && supportsBalanceCheck[this.options.endpoint]) {
+ if (
+ isEnabled(process.env.CHECK_BALANCE) &&
+ supportsBalanceCheck[this.options.endpointType ?? this.options.endpoint]
+ ) {
await checkBalance({
req: this.options.req,
res: this.options.res,
@@ -438,11 +438,14 @@ class BaseClient {
amount: promptTokens,
model: this.modelOptions.model,
endpoint: this.options.endpoint,
+ endpointTokenConfig: this.options.endpointTokenConfig,
},
});
}
const completion = await this.sendCompletion(payload, opts);
+ this.abortController.requestCompleted = true;
+
const responseMessage = {
messageId: responseMessageId,
conversationId,
@@ -453,6 +456,7 @@ class BaseClient {
sender: this.sender,
text: addSpaceIfNeeded(generation) + completion,
promptTokens,
+ ...(this.metadata ?? {}),
};
if (
@@ -548,7 +552,7 @@ class BaseClient {
*
* Each message object should have an 'id' or 'messageId' property and may have a 'parentMessageId' property.
* The 'parentMessageId' is the ID of the message that the current message is a reply to.
- * If 'parentMessageId' is not present, null, or is '00000000-0000-0000-0000-000000000000',
+ * If 'parentMessageId' is not present, null, or is Constants.NO_PARENT,
* the message is considered a root message.
*
* @param {Object} options - The options for the function.
@@ -603,9 +607,7 @@ class BaseClient {
}
currentMessageId =
- message.parentMessageId === '00000000-0000-0000-0000-000000000000'
- ? null
- : message.parentMessageId;
+ message.parentMessageId === Constants.NO_PARENT ? null : message.parentMessageId;
}
orderedMessages.reverse();
@@ -679,6 +681,54 @@ class BaseClient {
return await this.sendCompletion(payload, opts);
}
+
+ /**
+ *
+ * @param {TMessage[]} _messages
+ * @returns {Promise}
+ */
+ async addPreviousAttachments(_messages) {
+ if (!this.options.resendFiles) {
+ return _messages;
+ }
+
+ /**
+ *
+ * @param {TMessage} message
+ */
+ const processMessage = async (message) => {
+ if (!this.message_file_map) {
+ /** @type {Record */
+ this.message_file_map = {};
+ }
+
+ const fileIds = message.files.map((file) => file.file_id);
+ const files = await getFiles({
+ file_id: { $in: fileIds },
+ });
+
+ await this.addImageURLs(message, files);
+
+ this.message_file_map[message.messageId] = files;
+ return message;
+ };
+
+ const promises = [];
+
+ for (const message of _messages) {
+ if (!message.files) {
+ promises.push(message);
+ continue;
+ }
+
+ promises.push(processMessage(message));
+ }
+
+ const messages = await Promise.all(promises);
+
+ this.checkVisionRequest(Object.values(this.message_file_map ?? {}).flat());
+ return messages;
+ }
}
module.exports = BaseClient;
diff --git a/api/app/clients/ChatGPTClient.js b/api/app/clients/ChatGPTClient.js
index c1ae54fdf08..d218849513a 100644
--- a/api/app/clients/ChatGPTClient.js
+++ b/api/app/clients/ChatGPTClient.js
@@ -1,9 +1,19 @@
-const crypto = require('crypto');
const Keyv = require('keyv');
+const crypto = require('crypto');
+const {
+ EModelEndpoint,
+ resolveHeaders,
+ CohereConstants,
+ mapModelToAzureConfig,
+} = require('librechat-data-provider');
+const { CohereClient } = require('cohere-ai');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
+const { createCoherePayload } = require('./llm');
const { Agent, ProxyAgent } = require('undici');
const BaseClient = require('./BaseClient');
+const { logger } = require('~/config');
+const { extractBaseURL, constructAzureURL, genAzureChatCompletion } = require('~/utils');
const CHATGPT_MODEL = 'gpt-3.5-turbo';
const tokenizersCache = {};
@@ -140,11 +150,13 @@ class ChatGPTClient extends BaseClient {
return tokenizer;
}
- async getCompletion(input, onProgress, abortController = null) {
+ /** @type {getCompletion} */
+ async getCompletion(input, onProgress, onTokenProgress, abortController = null) {
if (!abortController) {
abortController = new AbortController();
}
- const modelOptions = { ...this.modelOptions };
+
+ let modelOptions = { ...this.modelOptions };
if (typeof onProgress === 'function') {
modelOptions.stream = true;
}
@@ -159,56 +171,176 @@ class ChatGPTClient extends BaseClient {
}
const { debug } = this.options;
- const url = this.completionsUrl;
+ let baseURL = this.completionsUrl;
if (debug) {
console.debug();
- console.debug(url);
+ console.debug(baseURL);
console.debug(modelOptions);
console.debug();
}
- if (this.azure || this.options.azure) {
- // Azure does not accept `model` in the body, so we need to remove it.
- delete modelOptions.model;
- }
-
const opts = {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
- body: JSON.stringify(modelOptions),
dispatcher: new Agent({
bodyTimeout: 0,
headersTimeout: 0,
}),
};
- if (this.apiKey && this.options.azure) {
- opts.headers['api-key'] = this.apiKey;
+ if (this.isVisionModel) {
+ modelOptions.max_tokens = 4000;
+ }
+
+ /** @type {TAzureConfig | undefined} */
+ const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
+
+ const isAzure = this.azure || this.options.azure;
+ if (
+ (isAzure && this.isVisionModel && azureConfig) ||
+ (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
+ ) {
+ const { modelGroupMap, groupMap } = azureConfig;
+ const {
+ azureOptions,
+ baseURL,
+ headers = {},
+ serverless,
+ } = mapModelToAzureConfig({
+ modelName: modelOptions.model,
+ modelGroupMap,
+ groupMap,
+ });
+ opts.headers = resolveHeaders(headers);
+ this.langchainProxy = extractBaseURL(baseURL);
+ this.apiKey = azureOptions.azureOpenAIApiKey;
+
+ const groupName = modelGroupMap[modelOptions.model].group;
+ this.options.addParams = azureConfig.groupMap[groupName].addParams;
+ this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
+ // Note: `forcePrompt` not re-assigned as only chat models are vision models
+
+ this.azure = !serverless && azureOptions;
+ this.azureEndpoint =
+ !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
+ }
+
+ if (this.options.headers) {
+ opts.headers = { ...opts.headers, ...this.options.headers };
+ }
+
+ if (isAzure) {
+ // Azure does not accept `model` in the body, so we need to remove it.
+ delete modelOptions.model;
+
+ baseURL = this.langchainProxy
+ ? constructAzureURL({
+ baseURL: this.langchainProxy,
+ azureOptions: this.azure,
+ })
+ : this.azureEndpoint.split(/(? msg.role === 'system');
+
+ if (systemMessageIndex > 0) {
+ const [systemMessage] = messages.splice(systemMessageIndex, 1);
+ messages.unshift(systemMessage);
+ }
+
+ modelOptions.messages = messages;
+
+ if (messages.length === 1 && messages[0].role === 'system') {
+ modelOptions.messages[0].role = 'user';
+ }
+ }
+
+ if (this.options.addParams && typeof this.options.addParams === 'object') {
+ modelOptions = {
+ ...modelOptions,
+ ...this.options.addParams,
+ };
+ logger.debug('[ChatGPTClient] chatCompletion: added params', {
+ addParams: this.options.addParams,
+ modelOptions,
+ });
+ }
+
+ if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
+ this.options.dropParams.forEach((param) => {
+ delete modelOptions[param];
+ });
+ logger.debug('[ChatGPTClient] chatCompletion: dropped params', {
+ dropParams: this.options.dropParams,
+ modelOptions,
+ });
+ }
+
+ if (baseURL.startsWith(CohereConstants.API_URL)) {
+ const payload = createCoherePayload({ modelOptions });
+ return await this.cohereChatCompletion({ payload, onTokenProgress });
+ }
+
+ if (baseURL.includes('v1') && !baseURL.includes('/completions') && !this.isChatCompletion) {
+ baseURL = baseURL.split('v1')[0] + 'v1/completions';
+ } else if (
+ baseURL.includes('v1') &&
+ !baseURL.includes('/chat/completions') &&
+ this.isChatCompletion
+ ) {
+ baseURL = baseURL.split('v1')[0] + 'v1/chat/completions';
+ }
+
+ const BASE_URL = new URL(baseURL);
+ if (opts.defaultQuery) {
+ Object.entries(opts.defaultQuery).forEach(([key, value]) => {
+ BASE_URL.searchParams.append(key, value);
+ });
+ delete opts.defaultQuery;
+ }
+
+ const completionsURL = BASE_URL.toString();
+ opts.body = JSON.stringify(modelOptions);
+
if (modelOptions.stream) {
// eslint-disable-next-line no-async-promise-executor
return new Promise(async (resolve, reject) => {
try {
let done = false;
- await fetchEventSource(url, {
+ await fetchEventSource(completionsURL, {
...opts,
signal: abortController.signal,
async onopen(response) {
@@ -236,7 +368,6 @@ class ChatGPTClient extends BaseClient {
// workaround for private API not sending [DONE] event
if (!done) {
onProgress('[DONE]');
- abortController.abort();
resolve();
}
},
@@ -249,14 +380,13 @@ class ChatGPTClient extends BaseClient {
},
onmessage(message) {
if (debug) {
- // console.debug(message);
+ console.debug(message);
}
if (!message.data || message.event === 'ping') {
return;
}
if (message.data === '[DONE]') {
onProgress('[DONE]');
- abortController.abort();
resolve();
done = true;
return;
@@ -269,7 +399,7 @@ class ChatGPTClient extends BaseClient {
}
});
}
- const response = await fetch(url, {
+ const response = await fetch(completionsURL, {
...opts,
signal: abortController.signal,
});
@@ -287,6 +417,35 @@ class ChatGPTClient extends BaseClient {
return response.json();
}
+ /** @type {cohereChatCompletion} */
+ async cohereChatCompletion({ payload, onTokenProgress }) {
+ const cohere = new CohereClient({
+ token: this.apiKey,
+ environment: this.completionsUrl,
+ });
+
+ if (!payload.stream) {
+ const chatResponse = await cohere.chat(payload);
+ return chatResponse.text;
+ }
+
+ const chatStream = await cohere.chatStream(payload);
+ let reply = '';
+ for await (const message of chatStream) {
+ if (!message) {
+ continue;
+ }
+
+ if (message.eventType === 'text-generation' && message.text) {
+ onTokenProgress(message.text);
+ } else if (message.eventType === 'stream-end' && message.response) {
+ reply = message.response.text;
+ }
+ }
+
+ return reply;
+ }
+
async generateTitle(userMessage, botMessage) {
const instructionsPayload = {
role: 'system',
diff --git a/api/app/clients/GoogleClient.js b/api/app/clients/GoogleClient.js
index 950cc8d1116..c5edcb275a8 100644
--- a/api/app/clients/GoogleClient.js
+++ b/api/app/clients/GoogleClient.js
@@ -4,16 +4,17 @@ const { GoogleVertexAI } = require('langchain/llms/googlevertexai');
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
const { ChatGoogleVertexAI } = require('langchain/chat_models/googlevertexai');
const { AIMessage, HumanMessage, SystemMessage } = require('langchain/schema');
-const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
+ validateVisionModel,
getResponseSender,
- EModelEndpoint,
endpointSettings,
+ EModelEndpoint,
AuthKeys,
} = require('librechat-data-provider');
+const { encodeAndFormat } = require('~/server/services/Files/images');
+const { formatMessage, createContextHandlers } = require('./prompts');
const { getModelMaxTokens } = require('~/utils');
-const { formatMessage } = require('./prompts');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -123,18 +124,11 @@ class GoogleClient extends BaseClient {
// stop: modelOptions.stop // no stop method for now
};
- if (this.options.attachments) {
- this.modelOptions.model = 'gemini-pro-vision';
- }
+ this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
// TODO: as of 12/14/23, only gemini models are "Generative AI" models provided by Google
this.isGenerativeModel = this.modelOptions.model.includes('gemini');
- this.isVisionModel = validateVisionModel(this.modelOptions.model);
const { isGenerativeModel } = this;
- if (this.isVisionModel && !this.options.attachments) {
- this.modelOptions.model = 'gemini-pro';
- this.isVisionModel = false;
- }
this.isChatModel = !isGenerativeModel && this.modelOptions.model.includes('chat');
const { isChatModel } = this;
this.isTextModel =
@@ -219,6 +213,33 @@ class GoogleClient extends BaseClient {
return this;
}
+ /**
+ *
+ * Checks if the model is a vision model based on request attachments and sets the appropriate options:
+ * @param {MongoFile[]} attachments
+ */
+ checkVisionRequest(attachments) {
+ /* Validation vision request */
+ this.defaultVisionModel = this.options.visionModel ?? 'gemini-pro-vision';
+ const availableModels = this.options.modelsConfig?.[EModelEndpoint.google];
+ this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
+
+ if (
+ attachments &&
+ attachments.some((file) => file?.type && file?.type?.includes('image')) &&
+ availableModels?.includes(this.defaultVisionModel) &&
+ !this.isVisionModel
+ ) {
+ this.modelOptions.model = this.defaultVisionModel;
+ this.isVisionModel = true;
+ }
+
+ if (this.isVisionModel && !attachments) {
+ this.modelOptions.model = 'gemini-pro';
+ this.isVisionModel = false;
+ }
+ }
+
formatMessages() {
return ((message) => ({
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
@@ -226,18 +247,45 @@ class GoogleClient extends BaseClient {
})).bind(this);
}
- async buildVisionMessages(messages = [], parentMessageId) {
- const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
- const attachments = await this.options.attachments;
+ /**
+ *
+ * Adds image URLs to the message object and returns the files
+ *
+ * @param {TMessage[]} messages
+ * @param {MongoFile[]} files
+ * @returns {Promise}
+ */
+ async addImageURLs(message, attachments) {
const { files, image_urls } = await encodeAndFormat(
this.options.req,
- attachments.filter((file) => file.type.includes('image')),
+ attachments,
EModelEndpoint.google,
);
+ message.image_urls = image_urls.length ? image_urls : undefined;
+ return files;
+ }
+ async buildVisionMessages(messages = [], parentMessageId) {
+ const attachments = await this.options.attachments;
const latestMessage = { ...messages[messages.length - 1] };
+ this.contextHandlers = createContextHandlers(this.options.req, latestMessage.text);
+
+ if (this.contextHandlers) {
+ for (const file of attachments) {
+ if (file.embedded) {
+ this.contextHandlers?.processFile(file);
+ continue;
+ }
+ }
+
+ this.augmentedPrompt = await this.contextHandlers.createContext();
+ this.options.promptPrefix = this.augmentedPrompt + this.options.promptPrefix;
+ }
+
+ const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
+
+ const files = await this.addImageURLs(latestMessage, attachments);
- latestMessage.image_urls = image_urls;
this.options.attachments = files;
latestMessage.text = prompt;
@@ -264,7 +312,7 @@ class GoogleClient extends BaseClient {
);
}
- if (this.options.attachments) {
+ if (this.options.attachments && this.isGenerativeModel) {
return this.buildVisionMessages(messages, parentMessageId);
}
diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js
index ca0c8d84248..f66afda4abd 100644
--- a/api/app/clients/OpenAIClient.js
+++ b/api/app/clients/OpenAIClient.js
@@ -1,21 +1,35 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
-const { getResponseSender, ImageDetailCost, ImageDetail } = require('librechat-data-provider');
+const {
+ ImageDetail,
+ EModelEndpoint,
+ resolveHeaders,
+ ImageDetailCost,
+ CohereConstants,
+ getResponseSender,
+ validateVisionModel,
+ mapModelToAzureConfig,
+} = require('librechat-data-provider');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
- getModelMaxTokens,
- genAzureChatCompletion,
extractBaseURL,
constructAzureURL,
+ getModelMaxTokens,
+ genAzureChatCompletion,
} = require('~/utils');
-const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
-const { truncateText, formatMessage, CUT_OFF_PROMPT } = require('./prompts');
+const {
+ truncateText,
+ formatMessage,
+ createContextHandlers,
+ CUT_OFF_PROMPT,
+ titleInstruction,
+} = require('./prompts');
+const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { handleOpenAIErrors } = require('./tools/util');
const spendTokens = require('~/models/spendTokens');
const { createLLM, RunManager } = require('./llm');
const ChatGPTClient = require('./ChatGPTClient');
const { isEnabled } = require('~/server/utils');
-const { getFiles } = require('~/models/File');
const { summaryBuffer } = require('./memory');
const { runTitleChain } = require('./chains');
const { tokenSplit } = require('./document');
@@ -32,7 +46,10 @@ class OpenAIClient extends BaseClient {
super(apiKey, options);
this.ChatGPTClient = new ChatGPTClient();
this.buildPrompt = this.ChatGPTClient.buildPrompt.bind(this);
+ /** @type {getCompletion} */
this.getCompletion = this.ChatGPTClient.getCompletion.bind(this);
+ /** @type {cohereChatCompletion} */
+ this.cohereChatCompletion = this.ChatGPTClient.cohereChatCompletion.bind(this);
this.contextStrategy = options.contextStrategy
? options.contextStrategy.toLowerCase()
: 'discard';
@@ -40,6 +57,10 @@ class OpenAIClient extends BaseClient {
/** @type {AzureOptions} */
this.azure = options.azure || false;
this.setOptions(options);
+ this.metadata = {};
+
+ /** @type {string | undefined} - The API Completions URL */
+ this.completionsUrl;
}
// TODO: PluginsClient calls this 3x, unneeded
@@ -83,7 +104,12 @@ class OpenAIClient extends BaseClient {
};
}
- this.checkVisionRequest(this.options.attachments);
+ this.defaultVisionModel = this.options.visionModel ?? 'gpt-4-vision-preview';
+ if (typeof this.options.attachments?.then === 'function') {
+ this.options.attachments.then((attachments) => this.checkVisionRequest(attachments));
+ } else {
+ this.checkVisionRequest(this.options.attachments);
+ }
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
if (OPENROUTER_API_KEY && !this.azure) {
@@ -131,7 +157,13 @@ class OpenAIClient extends BaseClient {
const { isChatGptModel } = this;
this.isUnofficialChatGptModel =
model.startsWith('text-chat') || model.startsWith('text-davinci-002-render');
- this.maxContextTokens = getModelMaxTokens(model) ?? 4095; // 1 less than maximum
+
+ this.maxContextTokens =
+ getModelMaxTokens(
+ model,
+ this.options.endpointType ?? this.options.endpoint,
+ this.options.endpointTokenConfig,
+ ) ?? 4095; // 1 less than maximum
if (this.shouldSummarize) {
this.maxContextTokens = Math.floor(this.maxContextTokens / 2);
@@ -208,13 +240,20 @@ class OpenAIClient extends BaseClient {
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
* - Sets `this.isVisionModel` to `true` if vision request.
* - Deletes `this.modelOptions.stop` if vision request.
- * @param {Array | MongoFile[]> | Record} attachments
+ * @param {MongoFile[]} attachments
*/
checkVisionRequest(attachments) {
- this.isVisionModel = validateVisionModel(this.modelOptions.model);
+ const availableModels = this.options.modelsConfig?.[this.options.endpoint];
+ this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
- if (attachments && !this.isVisionModel) {
- this.modelOptions.model = 'gpt-4-vision-preview';
+ const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
+ if (
+ attachments &&
+ attachments.some((file) => file?.type && file?.type?.includes('image')) &&
+ visionModelAvailable &&
+ !this.isVisionModel
+ ) {
+ this.modelOptions.model = this.defaultVisionModel;
this.isVisionModel = true;
}
@@ -349,7 +388,7 @@ class OpenAIClient extends BaseClient {
return {
chatGptLabel: this.options.chatGptLabel,
promptPrefix: this.options.promptPrefix,
- resendImages: this.options.resendImages,
+ resendFiles: this.options.resendFiles,
imageDetail: this.options.imageDetail,
...this.modelOptions,
};
@@ -363,54 +402,6 @@ class OpenAIClient extends BaseClient {
};
}
- /**
- *
- * @param {TMessage[]} _messages
- * @returns {TMessage[]}
- */
- async addPreviousAttachments(_messages) {
- if (!this.options.resendImages) {
- return _messages;
- }
-
- /**
- *
- * @param {TMessage} message
- */
- const processMessage = async (message) => {
- if (!this.message_file_map) {
- /** @type {Record */
- this.message_file_map = {};
- }
-
- const fileIds = message.files.map((file) => file.file_id);
- const files = await getFiles({
- file_id: { $in: fileIds },
- });
-
- await this.addImageURLs(message, files);
-
- this.message_file_map[message.messageId] = files;
- return message;
- };
-
- const promises = [];
-
- for (const message of _messages) {
- if (!message.files) {
- promises.push(message);
- continue;
- }
-
- promises.push(processMessage(message));
- }
-
- const messages = await Promise.all(promises);
-
- this.checkVisionRequest(this.message_file_map);
- return messages;
- }
-
/**
*
* Adds image URLs to the message object and returns the files
@@ -421,8 +412,7 @@ class OpenAIClient extends BaseClient {
*/
async addImageURLs(message, attachments) {
const { files, image_urls } = await encodeAndFormat(this.options.req, attachments);
-
- message.image_urls = image_urls;
+ message.image_urls = image_urls.length ? image_urls : undefined;
return files;
}
@@ -450,23 +440,9 @@ class OpenAIClient extends BaseClient {
let promptTokens;
promptPrefix = (promptPrefix || this.options.promptPrefix || '').trim();
- if (promptPrefix) {
- promptPrefix = `Instructions:\n${promptPrefix}`;
- instructions = {
- role: 'system',
- name: 'instructions',
- content: promptPrefix,
- };
-
- if (this.contextStrategy) {
- instructions.tokenCount = this.getTokenCountForMessage(instructions);
- }
- }
if (this.options.attachments) {
- const attachments = (await this.options.attachments).filter((file) =>
- file.type.includes('image'),
- );
+ const attachments = await this.options.attachments;
if (this.message_file_map) {
this.message_file_map[orderedMessages[orderedMessages.length - 1].messageId] = attachments;
@@ -484,6 +460,13 @@ class OpenAIClient extends BaseClient {
this.options.attachments = files;
}
+ if (this.message_file_map) {
+ this.contextHandlers = createContextHandlers(
+ this.options.req,
+ orderedMessages[orderedMessages.length - 1].text,
+ );
+ }
+
const formattedMessages = orderedMessages.map((message, i) => {
const formattedMessage = formatMessage({
message,
@@ -502,6 +485,11 @@ class OpenAIClient extends BaseClient {
if (this.message_file_map && this.message_file_map[message.messageId]) {
const attachments = this.message_file_map[message.messageId];
for (const file of attachments) {
+ if (file.embedded) {
+ this.contextHandlers?.processFile(file);
+ continue;
+ }
+
orderedMessages[i].tokenCount += this.calculateImageTokenCost({
width: file.width,
height: file.height,
@@ -513,6 +501,24 @@ class OpenAIClient extends BaseClient {
return formattedMessage;
});
+ if (this.contextHandlers) {
+ this.augmentedPrompt = await this.contextHandlers.createContext();
+ promptPrefix = this.augmentedPrompt + promptPrefix;
+ }
+
+ if (promptPrefix) {
+ promptPrefix = `Instructions:\n${promptPrefix.trim()}`;
+ instructions = {
+ role: 'system',
+ name: 'instructions',
+ content: promptPrefix,
+ };
+
+ if (this.contextStrategy) {
+ instructions.tokenCount = this.getTokenCountForMessage(instructions);
+ }
+ }
+
// TODO: need to handle interleaving instructions better
if (this.contextStrategy) {
({ payload, tokenCountMap, promptTokens, messages } = await this.handleContextStrategy({
@@ -540,15 +546,16 @@ class OpenAIClient extends BaseClient {
return result;
}
+ /** @type {sendCompletion} */
async sendCompletion(payload, opts = {}) {
let reply = '';
let result = null;
let streamResult = null;
this.modelOptions.user = this.user;
const invalidBaseUrl = this.completionsUrl && extractBaseURL(this.completionsUrl) === null;
- const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion);
+ const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion || typeof Bun !== 'undefined');
if (typeof opts.onProgress === 'function' && useOldMethod) {
- await this.getCompletion(
+ const completionResult = await this.getCompletion(
payload,
(progressMessage) => {
if (progressMessage === '[DONE]') {
@@ -581,12 +588,16 @@ class OpenAIClient extends BaseClient {
opts.onProgress(token);
reply += token;
},
+ opts.onProgress,
opts.abortController || new AbortController(),
);
+
+ if (completionResult && typeof completionResult === 'string') {
+ reply = completionResult;
+ }
} else if (typeof opts.onProgress === 'function' || this.options.useChatCompletion) {
reply = await this.chatCompletion({
payload,
- clientOptions: opts,
onProgress: opts.onProgress,
abortController: opts.abortController,
});
@@ -594,9 +605,14 @@ class OpenAIClient extends BaseClient {
result = await this.getCompletion(
payload,
null,
+ opts.onProgress,
opts.abortController || new AbortController(),
);
+ if (result && typeof result === 'string') {
+ return result.trim();
+ }
+
logger.debug('[OpenAIClient] sendCompletion: result', result);
if (this.isChatCompletion) {
@@ -606,11 +622,11 @@ class OpenAIClient extends BaseClient {
}
}
- if (streamResult && typeof opts.addMetadata === 'function') {
+ if (streamResult) {
const { finish_reason } = streamResult.choices[0];
- opts.addMetadata({ finish_reason });
+ this.metadata = { finish_reason };
}
- return reply.trim();
+ return (reply ?? '').trim();
}
initializeLLM({
@@ -624,6 +640,7 @@ class OpenAIClient extends BaseClient {
context,
tokenBuffer,
initialMessageCount,
+ conversationId,
}) {
const modelOptions = {
modelName: modelName ?? model,
@@ -653,6 +670,16 @@ class OpenAIClient extends BaseClient {
};
}
+ const { headers } = this.options;
+ if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
+ configOptions.baseOptions = {
+ headers: resolveHeaders({
+ ...headers,
+ ...configOptions?.baseOptions?.headers,
+ }),
+ };
+ }
+
if (this.options.proxy) {
configOptions.httpAgent = new HttpsProxyAgent(this.options.proxy);
configOptions.httpsAgent = new HttpsProxyAgent(this.options.proxy);
@@ -671,7 +698,7 @@ class OpenAIClient extends BaseClient {
callbacks: runManager.createCallbacks({
context,
tokenBuffer,
- conversationId: this.conversationId,
+ conversationId: this.conversationId ?? conversationId,
initialMessageCount,
}),
});
@@ -687,12 +714,13 @@ class OpenAIClient extends BaseClient {
*
* @param {Object} params - The parameters for the conversation title generation.
* @param {string} params.text - The user's input.
+ * @param {string} [params.conversationId] - The current conversationId, if not already defined on client initialization.
* @param {string} [params.responseText=''] - The AI's immediate response to the user.
*
* @returns {Promise} A promise that resolves to the generated conversation title.
* In case of failure, it will return the default title, "New Chat".
*/
- async titleConvo({ text, responseText = '' }) {
+ async titleConvo({ text, conversationId, responseText = '' }) {
let title = 'New Chat';
const convo = `||>User:
"${truncateText(text)}"
@@ -712,6 +740,39 @@ class OpenAIClient extends BaseClient {
max_tokens: 16,
};
+ /** @type {TAzureConfig | undefined} */
+ const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
+
+ const resetTitleOptions = !!(
+ (this.azure && azureConfig) ||
+ (azureConfig && this.options.endpoint === EModelEndpoint.azureOpenAI)
+ );
+
+ if (resetTitleOptions) {
+ const { modelGroupMap, groupMap } = azureConfig;
+ const {
+ azureOptions,
+ baseURL,
+ headers = {},
+ serverless,
+ } = mapModelToAzureConfig({
+ modelName: modelOptions.model,
+ modelGroupMap,
+ groupMap,
+ });
+
+ this.options.headers = resolveHeaders(headers);
+ this.options.reverseProxyUrl = baseURL ?? null;
+ this.langchainProxy = extractBaseURL(this.options.reverseProxyUrl);
+ this.apiKey = azureOptions.azureOpenAIApiKey;
+
+ const groupName = modelGroupMap[modelOptions.model].group;
+ this.options.addParams = azureConfig.groupMap[groupName].addParams;
+ this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
+ this.options.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
+ this.azure = !serverless && azureOptions;
+ }
+
const titleChatCompletion = async () => {
modelOptions.model = model;
@@ -723,8 +784,7 @@ class OpenAIClient extends BaseClient {
const instructionsPayload = [
{
role: 'system',
- content: `Detect user language and write in the same language an extremely concise title for this conversation, which you must accurately detect.
-Write in the detected language. Title in 5 Words or Less. No Punctuation or Quotation. Do not mention the language. All first letters of every word should be capitalized and write the title in User Language only.
+ content: `Please generate ${titleInstruction}
${convo}
@@ -732,10 +792,18 @@ ${convo}
},
];
+ const promptTokens = this.getTokenCountForMessage(instructionsPayload[0]);
+
try {
+ let useChatCompletion = true;
+ if (this.options.reverseProxyUrl === CohereConstants.API_URL) {
+ useChatCompletion = false;
+ }
title = (
- await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion: true })
+ await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion })
).replaceAll('"', '');
+ const completionTokens = this.getTokenCount(title);
+ this.recordTokenUsage({ promptTokens, completionTokens, context: 'title' });
} catch (e) {
logger.error(
'[OpenAIClient] There was an issue generating the title with the completion method',
@@ -752,7 +820,12 @@ ${convo}
try {
this.abortController = new AbortController();
- const llm = this.initializeLLM({ ...modelOptions, context: 'title', tokenBuffer: 150 });
+ const llm = this.initializeLLM({
+ ...modelOptions,
+ conversationId,
+ context: 'title',
+ tokenBuffer: 150,
+ });
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
} catch (e) {
if (e?.message?.toLowerCase()?.includes('abort')) {
@@ -779,7 +852,12 @@ ${convo}
// TODO: remove the gpt fallback and make it specific to endpoint
const { OPENAI_SUMMARY_MODEL = 'gpt-3.5-turbo' } = process.env ?? {};
const model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
- const maxContextTokens = getModelMaxTokens(model) ?? 4095;
+ const maxContextTokens =
+ getModelMaxTokens(
+ model,
+ this.options.endpointType ?? this.options.endpoint,
+ this.options.endpointTokenConfig,
+ ) ?? 4095; // 1 less than maximum
// 3 tokens for the assistant label, and 98 for the summarizer prompt (101)
let promptBuffer = 101;
@@ -877,14 +955,14 @@ ${convo}
}
}
- async recordTokenUsage({ promptTokens, completionTokens }) {
- logger.debug('[OpenAIClient] recordTokenUsage:', { promptTokens, completionTokens });
+ async recordTokenUsage({ promptTokens, completionTokens, context = 'message' }) {
await spendTokens(
{
+ context,
user: this.user,
model: this.modelOptions.model,
- context: 'message',
conversationId: this.conversationId,
+ endpointTokenConfig: this.options.endpointTokenConfig,
},
{ promptTokens, completionTokens },
);
@@ -897,7 +975,7 @@ ${convo}
});
}
- async chatCompletion({ payload, onProgress, clientOptions, abortController = null }) {
+ async chatCompletion({ payload, onProgress, abortController = null }) {
let error = null;
const errorCallback = (err) => (error = err);
let intermediateReply = '';
@@ -918,15 +996,6 @@ ${convo}
}
const baseURL = extractBaseURL(this.completionsUrl);
- // let { messages: _msgsToLog, ...modelOptionsToLog } = modelOptions;
- // if (modelOptionsToLog.messages) {
- // _msgsToLog = modelOptionsToLog.messages.map((msg) => {
- // let { content, ...rest } = msg;
-
- // if (content)
- // return { ...rest, content: truncateText(content) };
- // });
- // }
logger.debug('[OpenAIClient] chatCompletion', { baseURL, modelOptions });
const opts = {
baseURL,
@@ -951,6 +1020,38 @@ ${convo}
modelOptions.max_tokens = 4000;
}
+ /** @type {TAzureConfig | undefined} */
+ const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
+
+ if (
+ (this.azure && this.isVisionModel && azureConfig) ||
+ (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
+ ) {
+ const { modelGroupMap, groupMap } = azureConfig;
+ const {
+ azureOptions,
+ baseURL,
+ headers = {},
+ serverless,
+ } = mapModelToAzureConfig({
+ modelName: modelOptions.model,
+ modelGroupMap,
+ groupMap,
+ });
+ opts.defaultHeaders = resolveHeaders(headers);
+ this.langchainProxy = extractBaseURL(baseURL);
+ this.apiKey = azureOptions.azureOpenAIApiKey;
+
+ const groupName = modelGroupMap[modelOptions.model].group;
+ this.options.addParams = azureConfig.groupMap[groupName].addParams;
+ this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
+ // Note: `forcePrompt` not re-assigned as only chat models are vision models
+
+ this.azure = !serverless && azureOptions;
+ this.azureEndpoint =
+ !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
+ }
+
if (this.azure || this.options.azure) {
// Azure does not accept `model` in the body, so we need to remove it.
delete modelOptions.model;
@@ -958,9 +1059,10 @@ ${convo}
opts.baseURL = this.langchainProxy
? constructAzureURL({
baseURL: this.langchainProxy,
- azure: this.azure,
+ azureOptions: this.azure,
})
- : this.azureEndpoint.split(/\/(chat|completion)/)[0];
+ : this.azureEndpoint.split(/(? msg.role === 'system');
+
+ if (systemMessageIndex > 0) {
+ const [systemMessage] = messages.splice(systemMessageIndex, 1);
+ messages.unshift(systemMessage);
+ }
+
+ modelOptions.messages = messages;
+
if (messages.length === 1 && messages[0].role === 'system') {
modelOptions.messages[0].role = 'user';
}
@@ -988,12 +1104,20 @@ ${convo}
...modelOptions,
...this.options.addParams,
};
+ logger.debug('[OpenAIClient] chatCompletion: added params', {
+ addParams: this.options.addParams,
+ modelOptions,
+ });
}
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
this.options.dropParams.forEach((param) => {
delete modelOptions[param];
});
+ logger.debug('[OpenAIClient] chatCompletion: dropped params', {
+ dropParams: this.options.dropParams,
+ modelOptions,
+ });
}
let UnexpectedRoleError = false;
@@ -1009,6 +1133,16 @@ ${convo}
.on('error', (err) => {
handleOpenAIErrors(err, errorCallback, 'stream');
})
+ .on('finalChatCompletion', (finalChatCompletion) => {
+ const finalMessage = finalChatCompletion?.choices?.[0]?.message;
+ if (finalMessage && finalMessage?.role !== 'assistant') {
+ finalChatCompletion.choices[0].message.role = 'assistant';
+ }
+
+ if (finalMessage && !finalMessage?.content?.trim()) {
+ finalChatCompletion.choices[0].message.content = intermediateReply;
+ }
+ })
.on('finalMessage', (message) => {
if (message?.role !== 'assistant') {
stream.messages.push({ role: 'assistant', content: intermediateReply });
@@ -1054,12 +1188,20 @@ ${convo}
}
const { message, finish_reason } = chatCompletion.choices[0];
- if (chatCompletion && typeof clientOptions.addMetadata === 'function') {
- clientOptions.addMetadata({ finish_reason });
+ if (chatCompletion) {
+ this.metadata = { finish_reason };
}
logger.debug('[OpenAIClient] chatCompletion response', chatCompletion);
+ if (!message?.content?.trim() && intermediateReply.length) {
+ logger.debug(
+ '[OpenAIClient] chatCompletion: using intermediateReply due to empty message.content',
+ { intermediateReply },
+ );
+ return intermediateReply;
+ }
+
return message.content;
} catch (err) {
if (
@@ -1072,6 +1214,9 @@ ${convo}
err?.message?.includes(
'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant',
) ||
+ err?.message?.includes(
+ 'stream ended without producing a ChatCompletionMessage with role=assistant',
+ ) ||
err?.message?.includes('The server had an error processing your request') ||
err?.message?.includes('missing finish_reason') ||
err?.message?.includes('missing role') ||
diff --git a/api/app/clients/PluginsClient.js b/api/app/clients/PluginsClient.js
index 6118c3547a1..033c122664f 100644
--- a/api/app/clients/PluginsClient.js
+++ b/api/app/clients/PluginsClient.js
@@ -3,6 +3,7 @@ const { CallbackManager } = require('langchain/callbacks');
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
+const { processFileURL } = require('~/server/services/Files/process');
const { EModelEndpoint } = require('librechat-data-provider');
const { formatLangChainMessages } = require('./prompts');
const checkBalance = require('~/models/checkBalance');
@@ -30,10 +31,6 @@ class PluginsClient extends OpenAIClient {
super.setOptions(options);
- if (this.functionsAgent && this.agentOptions.model && !this.useOpenRouter) {
- this.agentOptions.model = this.getFunctionModelName(this.agentOptions.model);
- }
-
this.isGpt3 = this.modelOptions?.model?.includes('gpt-3');
if (this.options.reverseProxyUrl) {
@@ -113,6 +110,7 @@ class PluginsClient extends OpenAIClient {
openAIApiKey: this.openAIApiKey,
conversationId: this.conversationId,
fileStrategy: this.options.req.app.locals.fileStrategy,
+ processFileURL,
message,
},
});
diff --git a/api/app/clients/llm/createCoherePayload.js b/api/app/clients/llm/createCoherePayload.js
new file mode 100644
index 00000000000..58803d76f3c
--- /dev/null
+++ b/api/app/clients/llm/createCoherePayload.js
@@ -0,0 +1,85 @@
+const { CohereConstants } = require('librechat-data-provider');
+const { titleInstruction } = require('../prompts/titlePrompts');
+
+// Mapping OpenAI roles to Cohere roles
+const roleMap = {
+ user: CohereConstants.ROLE_USER,
+ assistant: CohereConstants.ROLE_CHATBOT,
+ system: CohereConstants.ROLE_SYSTEM, // Recognize and map the system role explicitly
+};
+
+/**
+ * Adjusts an OpenAI ChatCompletionPayload to conform with Cohere's expected chat payload format.
+ * Now includes handling for "system" roles explicitly mentioned.
+ *
+ * @param {Object} options - Object containing the model options.
+ * @param {ChatCompletionPayload} options.modelOptions - The OpenAI model payload options.
+ * @returns {CohereChatStreamRequest} Cohere-compatible chat API payload.
+ */
+function createCoherePayload({ modelOptions }) {
+ /** @type {string | undefined} */
+ let preamble;
+ let latestUserMessageContent = '';
+ const {
+ stream,
+ stop,
+ top_p,
+ temperature,
+ frequency_penalty,
+ presence_penalty,
+ max_tokens,
+ messages,
+ model,
+ ...rest
+ } = modelOptions;
+
+ // Filter out the latest user message and transform remaining messages to Cohere's chat_history format
+ let chatHistory = messages.reduce((acc, message, index, arr) => {
+ const isLastUserMessage = index === arr.length - 1 && message.role === 'user';
+
+ const messageContent =
+ typeof message.content === 'string'
+ ? message.content
+ : message.content.map((part) => (part.type === 'text' ? part.text : '')).join(' ');
+
+ if (isLastUserMessage) {
+ latestUserMessageContent = messageContent;
+ } else {
+ acc.push({
+ role: roleMap[message.role] || CohereConstants.ROLE_USER,
+ message: messageContent,
+ });
+ }
+
+ return acc;
+ }, []);
+
+ if (
+ chatHistory.length === 1 &&
+ chatHistory[0].role === CohereConstants.ROLE_SYSTEM &&
+ !latestUserMessageContent.length
+ ) {
+ const message = chatHistory[0].message;
+ latestUserMessageContent = message.includes(titleInstruction)
+ ? CohereConstants.TITLE_MESSAGE
+ : '.';
+ preamble = message;
+ }
+
+ return {
+ message: latestUserMessageContent,
+ model: model,
+ chatHistory,
+ stream: stream ?? false,
+ temperature: temperature,
+ frequencyPenalty: frequency_penalty,
+ presencePenalty: presence_penalty,
+ maxTokens: max_tokens,
+ stopSequences: stop,
+ preamble,
+ p: top_p,
+ ...rest,
+ };
+}
+
+module.exports = createCoherePayload;
diff --git a/api/app/clients/llm/createLLM.js b/api/app/clients/llm/createLLM.js
index 62f2fe86f95..09b29cca8e9 100644
--- a/api/app/clients/llm/createLLM.js
+++ b/api/app/clients/llm/createLLM.js
@@ -55,16 +55,18 @@ function createLLM({
}
if (azure && configOptions.basePath) {
- configOptions.basePath = constructAzureURL({
+ const azureURL = constructAzureURL({
baseURL: configOptions.basePath,
- azure: azureOptions,
+ azureOptions,
});
+ azureOptions.azureOpenAIBasePath = azureURL.split(
+ `/${azureOptions.azureOpenAIApiDeploymentName}`,
+ )[0];
}
return new ChatOpenAI(
{
streaming,
- verbose: true,
credentials,
configuration,
...azureOptions,
diff --git a/api/app/clients/llm/index.js b/api/app/clients/llm/index.js
index 46478ade63b..2e09bbb841b 100644
--- a/api/app/clients/llm/index.js
+++ b/api/app/clients/llm/index.js
@@ -1,7 +1,9 @@
const createLLM = require('./createLLM');
const RunManager = require('./RunManager');
+const createCoherePayload = require('./createCoherePayload');
module.exports = {
createLLM,
RunManager,
+ createCoherePayload,
};
diff --git a/api/app/clients/prompts/createContextHandlers.js b/api/app/clients/prompts/createContextHandlers.js
new file mode 100644
index 00000000000..e48dfd8e672
--- /dev/null
+++ b/api/app/clients/prompts/createContextHandlers.js
@@ -0,0 +1,159 @@
+const axios = require('axios');
+const { isEnabled } = require('~/server/utils');
+const { logger } = require('~/config');
+
+const footer = `Use the context as your learned knowledge to better answer the user.
+
+In your response, remember to follow these guidelines:
+- If you don't know the answer, simply say that you don't know.
+- If you are unsure how to answer, ask for clarification.
+- Avoid mentioning that you obtained the information from the context.
+
+Answer appropriately in the user's language.
+`;
+
+function createContextHandlers(req, userMessageContent) {
+ if (!process.env.RAG_API_URL) {
+ return;
+ }
+
+ const queryPromises = [];
+ const processedFiles = [];
+ const processedIds = new Set();
+ const jwtToken = req.headers.authorization.split(' ')[1];
+ const useFullContext = isEnabled(process.env.RAG_USE_FULL_CONTEXT);
+
+ const query = async (file) => {
+ if (useFullContext) {
+ return axios.get(`${process.env.RAG_API_URL}/documents/${file.file_id}/context`, {
+ headers: {
+ Authorization: `Bearer ${jwtToken}`,
+ },
+ });
+ }
+
+ return axios.post(
+ `${process.env.RAG_API_URL}/query`,
+ {
+ file_id: file.file_id,
+ query: userMessageContent,
+ k: 4,
+ },
+ {
+ headers: {
+ Authorization: `Bearer ${jwtToken}`,
+ 'Content-Type': 'application/json',
+ },
+ },
+ );
+ };
+
+ const processFile = async (file) => {
+ if (file.embedded && !processedIds.has(file.file_id)) {
+ try {
+ const promise = query(file);
+ queryPromises.push(promise);
+ processedFiles.push(file);
+ processedIds.add(file.file_id);
+ } catch (error) {
+ logger.error(`Error processing file ${file.filename}:`, error);
+ }
+ }
+ };
+
+ const createContext = async () => {
+ try {
+ if (!queryPromises.length || !processedFiles.length) {
+ return '';
+ }
+
+ const oneFile = processedFiles.length === 1;
+ const header = `The user has attached ${oneFile ? 'a' : processedFiles.length} file${
+ !oneFile ? 's' : ''
+ } to the conversation:`;
+
+ const files = `${
+ oneFile
+ ? ''
+ : `
+ `
+ }${processedFiles
+ .map(
+ (file) => `
+
+ ${file.filename}
+ ${file.type}
+ `,
+ )
+ .join('')}${
+ oneFile
+ ? ''
+ : `
+ `
+ }`;
+
+ const resolvedQueries = await Promise.all(queryPromises);
+
+ const context = resolvedQueries
+ .map((queryResult, index) => {
+ const file = processedFiles[index];
+ let contextItems = queryResult.data;
+
+ const generateContext = (currentContext) =>
+ `
+
+ ${file.filename}
+ ${currentContext}
+
+ `;
+
+ if (useFullContext) {
+ return generateContext(`\n${contextItems}`);
+ }
+
+ contextItems = queryResult.data
+ .map((item) => {
+ const pageContent = item[0].page_content;
+ return `
+
+
+ `;
+ })
+ .join('');
+
+ return generateContext(contextItems);
+ })
+ .join('');
+
+ if (useFullContext) {
+ const prompt = `${header}
+ ${context}
+ ${footer}`;
+
+ return prompt;
+ }
+
+ const prompt = `${header}
+ ${files}
+
+ A semantic search was executed with the user's message as the query, retrieving the following context inside XML tags.
+
+ ${context}
+
+
+ ${footer}`;
+
+ return prompt;
+ } catch (error) {
+ logger.error('Error creating context:', error);
+ throw error;
+ }
+ };
+
+ return {
+ processFile,
+ createContext,
+ };
+}
+
+module.exports = createContextHandlers;
diff --git a/api/app/clients/prompts/createVisionPrompt.js b/api/app/clients/prompts/createVisionPrompt.js
new file mode 100644
index 00000000000..5d8a7bbf51b
--- /dev/null
+++ b/api/app/clients/prompts/createVisionPrompt.js
@@ -0,0 +1,34 @@
+/**
+ * Generates a prompt instructing the user to describe an image in detail, tailored to different types of visual content.
+ * @param {boolean} pluralized - Whether to pluralize the prompt for multiple images.
+ * @returns {string} - The generated vision prompt.
+ */
+const createVisionPrompt = (pluralized = false) => {
+ return `Please describe the image${
+ pluralized ? 's' : ''
+ } in detail, covering relevant aspects such as:
+
+ For photographs, illustrations, or artwork:
+ - The main subject(s) and their appearance, positioning, and actions
+ - The setting, background, and any notable objects or elements
+ - Colors, lighting, and overall mood or atmosphere
+ - Any interesting details, textures, or patterns
+ - The style, technique, or medium used (if discernible)
+
+ For screenshots or images containing text:
+ - The content and purpose of the text
+ - The layout, formatting, and organization of the information
+ - Any notable visual elements, such as logos, icons, or graphics
+ - The overall context or message conveyed by the screenshot
+
+ For graphs, charts, or data visualizations:
+ - The type of graph or chart (e.g., bar graph, line chart, pie chart)
+ - The variables being compared or analyzed
+ - Any trends, patterns, or outliers in the data
+ - The axis labels, scales, and units of measurement
+ - The title, legend, and any additional context provided
+
+ Be as specific and descriptive as possible while maintaining clarity and concision.`;
+};
+
+module.exports = createVisionPrompt;
diff --git a/api/app/clients/prompts/formatMessages.js b/api/app/clients/prompts/formatMessages.js
index 1b97bc7ffa1..c19eee260af 100644
--- a/api/app/clients/prompts/formatMessages.js
+++ b/api/app/clients/prompts/formatMessages.js
@@ -1,3 +1,4 @@
+const { EModelEndpoint } = require('librechat-data-provider');
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
/**
@@ -7,10 +8,16 @@ const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
* @param {Object} params.message - The message object to format.
* @param {string} [params.message.role] - The role of the message sender (must be 'user').
* @param {string} [params.message.content] - The text content of the message.
+ * @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling
* @param {Array} [params.image_urls] - The image_urls to attach to the message.
* @returns {(Object)} - The formatted message.
*/
-const formatVisionMessage = ({ message, image_urls }) => {
+const formatVisionMessage = ({ message, image_urls, endpoint }) => {
+ if (endpoint === EModelEndpoint.anthropic) {
+ message.content = [...image_urls, { type: 'text', text: message.content }];
+ return message;
+ }
+
message.content = [{ type: 'text', text: message.content }, ...image_urls];
return message;
@@ -29,10 +36,11 @@ const formatVisionMessage = ({ message, image_urls }) => {
* @param {Array} [params.message.image_urls] - The image_urls attached to the message for Vision API.
* @param {string} [params.userName] - The name of the user.
* @param {string} [params.assistantName] - The name of the assistant.
+ * @param {string} [params.endpoint] - Identifier for specific endpoint handling
* @param {boolean} [params.langChain=false] - Whether to return a LangChain message object.
* @returns {(Object|HumanMessage|AIMessage|SystemMessage)} - The formatted message.
*/
-const formatMessage = ({ message, userName, assistantName, langChain = false }) => {
+const formatMessage = ({ message, userName, assistantName, endpoint, langChain = false }) => {
let { role: _role, _name, sender, text, content: _content, lc_id } = message;
if (lc_id && lc_id[2] && !langChain) {
const roleMapping = {
@@ -51,7 +59,11 @@ const formatMessage = ({ message, userName, assistantName, langChain = false })
const { image_urls } = message;
if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') {
- return formatVisionMessage({ message: formattedMessage, image_urls: message.image_urls });
+ return formatVisionMessage({
+ message: formattedMessage,
+ image_urls: message.image_urls,
+ endpoint,
+ });
}
if (_name) {
diff --git a/api/app/clients/prompts/formatMessages.spec.js b/api/app/clients/prompts/formatMessages.spec.js
index 636cdb1c8e5..8d4956b3811 100644
--- a/api/app/clients/prompts/formatMessages.spec.js
+++ b/api/app/clients/prompts/formatMessages.spec.js
@@ -1,5 +1,6 @@
-const { formatMessage, formatLangChainMessages, formatFromLangChain } = require('./formatMessages');
+const { Constants } = require('librechat-data-provider');
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
+const { formatMessage, formatLangChainMessages, formatFromLangChain } = require('./formatMessages');
describe('formatMessage', () => {
it('formats user message', () => {
@@ -61,7 +62,7 @@ describe('formatMessage', () => {
isCreatedByUser: true,
isEdited: false,
model: null,
- parentMessageId: '00000000-0000-0000-0000-000000000000',
+ parentMessageId: Constants.NO_PARENT,
sender: 'User',
text: 'hi',
tokenCount: 5,
diff --git a/api/app/clients/prompts/index.js b/api/app/clients/prompts/index.js
index 40db3d90439..36bb6f7e283 100644
--- a/api/app/clients/prompts/index.js
+++ b/api/app/clients/prompts/index.js
@@ -4,6 +4,8 @@ const handleInputs = require('./handleInputs');
const instructions = require('./instructions');
const titlePrompts = require('./titlePrompts');
const truncateText = require('./truncateText');
+const createVisionPrompt = require('./createVisionPrompt');
+const createContextHandlers = require('./createContextHandlers');
module.exports = {
...formatMessages,
@@ -12,4 +14,6 @@ module.exports = {
...instructions,
...titlePrompts,
truncateText,
+ createVisionPrompt,
+ createContextHandlers,
};
diff --git a/api/app/clients/prompts/titlePrompts.js b/api/app/clients/prompts/titlePrompts.js
index 1e893ba295d..83d8909f3a7 100644
--- a/api/app/clients/prompts/titlePrompts.js
+++ b/api/app/clients/prompts/titlePrompts.js
@@ -27,7 +27,63 @@ ${convo}`,
return titlePrompt;
};
+const titleInstruction =
+ 'a concise, 5-word-or-less title for the conversation, using its same language, with no punctuation. Apply title case conventions appropriate for the language. For English, use AP Stylebook Title Case. Never directly mention the language name or the word "title"';
+const titleFunctionPrompt = `In this environment you have access to a set of tools you can use to generate the conversation title.
+
+You may call them like this:
+
+
+$TOOL_NAME
+
+<$PARAMETER_NAME>$PARAMETER_VALUE$PARAMETER_NAME>
+...
+
+
+
+
+Here are the tools available:
+
+
+submit_title
+
+Submit a brief title in the conversation's language, following the parameter description closely.
+
+
+
+title
+string
+${titleInstruction}
+
+
+
+`;
+
+/**
+ * Parses titles from title functions based on the provided prompt.
+ * @param {string} prompt - The prompt containing the title function.
+ * @returns {string} The parsed title. "New Chat" if no title is found.
+ */
+function parseTitleFromPrompt(prompt) {
+ const titleRegex = /(.+?)<\/title>/;
+ const titleMatch = prompt.match(titleRegex);
+
+ if (titleMatch && titleMatch[1]) {
+ const title = titleMatch[1].trim();
+
+ // // Capitalize the first letter of each word; Note: unnecessary due to title case prompting
+ // const capitalizedTitle = title.replace(/\b\w/g, (char) => char.toUpperCase());
+
+ return title;
+ }
+
+ return 'New Chat';
+}
+
module.exports = {
langPrompt,
+ titleInstruction,
createTitlePrompt,
+ titleFunctionPrompt,
+ parseTitleFromPrompt,
};
diff --git a/api/app/clients/specs/BaseClient.test.js b/api/app/clients/specs/BaseClient.test.js
index 889499fbc29..9ffa7e04f1b 100644
--- a/api/app/clients/specs/BaseClient.test.js
+++ b/api/app/clients/specs/BaseClient.test.js
@@ -1,3 +1,4 @@
+const { Constants } = require('librechat-data-provider');
const { initializeFakeClient } = require('./FakeClient');
jest.mock('../../../lib/db/connectDb');
@@ -307,7 +308,7 @@ describe('BaseClient', () => {
const unorderedMessages = [
{ id: '3', parentMessageId: '2', text: 'Message 3' },
{ id: '2', parentMessageId: '1', text: 'Message 2' },
- { id: '1', parentMessageId: '00000000-0000-0000-0000-000000000000', text: 'Message 1' },
+ { id: '1', parentMessageId: Constants.NO_PARENT, text: 'Message 1' },
];
it('should return ordered messages based on parentMessageId', () => {
@@ -316,7 +317,7 @@ describe('BaseClient', () => {
parentMessageId: '3',
});
expect(result).toEqual([
- { id: '1', parentMessageId: '00000000-0000-0000-0000-000000000000', text: 'Message 1' },
+ { id: '1', parentMessageId: Constants.NO_PARENT, text: 'Message 1' },
{ id: '2', parentMessageId: '1', text: 'Message 2' },
{ id: '3', parentMessageId: '2', text: 'Message 3' },
]);
diff --git a/api/app/clients/specs/PluginsClient.test.js b/api/app/clients/specs/PluginsClient.test.js
index b4e42b1fc51..dfd57b23b94 100644
--- a/api/app/clients/specs/PluginsClient.test.js
+++ b/api/app/clients/specs/PluginsClient.test.js
@@ -1,9 +1,10 @@
+const crypto = require('crypto');
+const { Constants } = require('librechat-data-provider');
const { HumanChatMessage, AIChatMessage } = require('langchain/schema');
const PluginsClient = require('../PluginsClient');
-const crypto = require('crypto');
-jest.mock('../../../lib/db/connectDb');
-jest.mock('../../../models/Conversation', () => {
+jest.mock('~/lib/db/connectDb');
+jest.mock('~/models/Conversation', () => {
return function () {
return {
save: jest.fn(),
@@ -12,6 +13,12 @@ jest.mock('../../../models/Conversation', () => {
};
});
+const defaultAzureOptions = {
+ azureOpenAIApiInstanceName: 'your-instance-name',
+ azureOpenAIApiDeploymentName: 'your-deployment-name',
+ azureOpenAIApiVersion: '2020-07-01-preview',
+};
+
describe('PluginsClient', () => {
let TestAgent;
let options = {
@@ -60,7 +67,7 @@ describe('PluginsClient', () => {
TestAgent.setOptions(opts);
}
const conversationId = opts.conversationId || crypto.randomUUID();
- const parentMessageId = opts.parentMessageId || '00000000-0000-0000-0000-000000000000';
+ const parentMessageId = opts.parentMessageId || Constants.NO_PARENT;
const userMessageId = opts.overrideParentMessageId || crypto.randomUUID();
this.pastMessages = await TestAgent.loadHistory(
conversationId,
@@ -187,4 +194,30 @@ describe('PluginsClient', () => {
expect(client.getFunctionModelName('')).toBe('gpt-3.5-turbo');
});
});
+ describe('Azure OpenAI tests specific to Plugins', () => {
+ // TODO: add more tests for Azure OpenAI integration with Plugins
+ // let client;
+ // beforeEach(() => {
+ // client = new PluginsClient('dummy_api_key');
+ // });
+
+ test('should not call getFunctionModelName when azure options are set', () => {
+ const spy = jest.spyOn(PluginsClient.prototype, 'getFunctionModelName');
+ const model = 'gpt-4-turbo';
+
+ // note, without the azure change in PR #1766, `getFunctionModelName` is called twice
+ const testClient = new PluginsClient('dummy_api_key', {
+ agentOptions: {
+ model,
+ agent: 'functions',
+ },
+ azure: defaultAzureOptions,
+ });
+
+ expect(spy).not.toHaveBeenCalled();
+ expect(testClient.agentOptions.model).toBe(model);
+
+ spy.mockRestore();
+ });
+ });
});
diff --git a/api/app/clients/tools/DALL-E.js b/api/app/clients/tools/DALL-E.js
index 4eca7f7932e..4600bdb026e 100644
--- a/api/app/clients/tools/DALL-E.js
+++ b/api/app/clients/tools/DALL-E.js
@@ -3,42 +3,39 @@ const OpenAI = require('openai');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('langchain/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
+const { FileContext } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
-const { processFileURL } = require('~/server/services/Files/process');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
-const {
- DALLE2_SYSTEM_PROMPT,
- DALLE_REVERSE_PROXY,
- PROXY,
- DALLE2_AZURE_API_VERSION,
- DALLE2_BASEURL,
- DALLE2_API_KEY,
- DALLE_API_KEY,
-} = process.env;
class OpenAICreateImage extends Tool {
constructor(fields = {}) {
super();
this.userId = fields.userId;
this.fileStrategy = fields.fileStrategy;
+ if (fields.processFileURL) {
+ this.processFileURL = fields.processFileURL.bind(this);
+ }
let apiKey = fields.DALLE2_API_KEY ?? fields.DALLE_API_KEY ?? this.getApiKey();
const config = { apiKey };
- if (DALLE_REVERSE_PROXY) {
- config.baseURL = extractBaseURL(DALLE_REVERSE_PROXY);
+ if (process.env.DALLE_REVERSE_PROXY) {
+ config.baseURL = extractBaseURL(process.env.DALLE_REVERSE_PROXY);
}
- if (DALLE2_AZURE_API_VERSION && DALLE2_BASEURL) {
- config.baseURL = DALLE2_BASEURL;
- config.defaultQuery = { 'api-version': DALLE2_AZURE_API_VERSION };
- config.defaultHeaders = { 'api-key': DALLE2_API_KEY, 'Content-Type': 'application/json' };
- config.apiKey = DALLE2_API_KEY;
+ if (process.env.DALLE2_AZURE_API_VERSION && process.env.DALLE2_BASEURL) {
+ config.baseURL = process.env.DALLE2_BASEURL;
+ config.defaultQuery = { 'api-version': process.env.DALLE2_AZURE_API_VERSION };
+ config.defaultHeaders = {
+ 'api-key': process.env.DALLE2_API_KEY,
+ 'Content-Type': 'application/json',
+ };
+ config.apiKey = process.env.DALLE2_API_KEY;
}
- if (PROXY) {
- config.httpAgent = new HttpsProxyAgent(PROXY);
+ if (process.env.PROXY) {
+ config.httpAgent = new HttpsProxyAgent(process.env.PROXY);
}
this.openai = new OpenAI(config);
@@ -51,7 +48,7 @@ Guidelines:
"Subject: [subject], Style: [style], Color: [color], Details: [details], Emotion: [emotion]"
- Generate images only once per human query unless explicitly requested by the user`;
this.description_for_model =
- DALLE2_SYSTEM_PROMPT ??
+ process.env.DALLE2_SYSTEM_PROMPT ??
`// Whenever a description of an image is given, generate prompts (following these rules), and use dalle to create the image. If the user does not ask for a specific number of images, default to creating 2 prompts to send to dalle that are written to be as diverse as possible. All prompts sent to dalle must abide by the following policies:
// 1. Prompts must be in English. Translate to English if needed.
// 2. One image per function call. Create only 1 image per request unless explicitly told to generate more than 1 image.
@@ -67,7 +64,7 @@ Guidelines:
}
getApiKey() {
- const apiKey = DALLE2_API_KEY ?? DALLE_API_KEY ?? '';
+ const apiKey = process.env.DALLE2_API_KEY ?? process.env.DALLE_API_KEY ?? '';
if (!apiKey) {
throw new Error('Missing DALLE_API_KEY environment variable.');
}
@@ -86,13 +83,21 @@ Guidelines:
}
async _call(input) {
- const resp = await this.openai.images.generate({
- prompt: this.replaceUnwantedChars(input),
- // TODO: Future idea -- could we ask an LLM to extract these arguments from an input that might contain them?
- n: 1,
- // size: '1024x1024'
- size: '512x512',
- });
+ let resp;
+
+ try {
+ resp = await this.openai.images.generate({
+ prompt: this.replaceUnwantedChars(input),
+ // TODO: Future idea -- could we ask an LLM to extract these arguments from an input that might contain them?
+ n: 1,
+ // size: '1024x1024'
+ size: '512x512',
+ });
+ } catch (error) {
+ logger.error('[DALL-E] Problem generating the image:', error);
+ return `Something went wrong when trying to generate the image. The DALL-E API may be unavailable:
+Error Message: ${error.message}`;
+ }
const theImageUrl = resp.data[0].url;
@@ -116,15 +121,16 @@ Guidelines:
});
try {
- const result = await processFileURL({
+ const result = await this.processFileURL({
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: theImageUrl,
fileName: imageName,
basePath: 'images',
+ context: FileContext.image_generation,
});
- this.result = this.wrapInMarkdown(result);
+ this.result = this.wrapInMarkdown(result.filepath);
} catch (error) {
logger.error('Error while saving the image:', error);
this.result = `Failed to save the image locally. ${error.message}`;
diff --git a/api/app/clients/tools/GoogleSearch.js b/api/app/clients/tools/GoogleSearch.js
deleted file mode 100644
index 3d7574b6c19..00000000000
--- a/api/app/clients/tools/GoogleSearch.js
+++ /dev/null
@@ -1,121 +0,0 @@
-const { google } = require('googleapis');
-const { Tool } = require('langchain/tools');
-const { logger } = require('~/config');
-
-/**
- * Represents a tool that allows an agent to use the Google Custom Search API.
- * @extends Tool
- */
-class GoogleSearchAPI extends Tool {
- constructor(fields = {}) {
- super();
- this.cx = fields.GOOGLE_CSE_ID || this.getCx();
- this.apiKey = fields.GOOGLE_API_KEY || this.getApiKey();
- this.customSearch = undefined;
- }
-
- /**
- * The name of the tool.
- * @type {string}
- */
- name = 'google';
-
- /**
- * A description for the agent to use
- * @type {string}
- */
- description =
- 'Use the \'google\' tool to retrieve internet search results relevant to your input. The results will return links and snippets of text from the webpages';
- description_for_model =
- 'Use the \'google\' tool to retrieve internet search results relevant to your input. The results will return links and snippets of text from the webpages';
-
- getCx() {
- const cx = process.env.GOOGLE_CSE_ID || '';
- if (!cx) {
- throw new Error('Missing GOOGLE_CSE_ID environment variable.');
- }
- return cx;
- }
-
- getApiKey() {
- const apiKey = process.env.GOOGLE_API_KEY || '';
- if (!apiKey) {
- throw new Error('Missing GOOGLE_API_KEY environment variable.');
- }
- return apiKey;
- }
-
- getCustomSearch() {
- if (!this.customSearch) {
- const version = 'v1';
- this.customSearch = google.customsearch(version);
- }
- return this.customSearch;
- }
-
- resultsToReadableFormat(results) {
- let output = 'Results:\n';
-
- results.forEach((resultObj, index) => {
- output += `Title: ${resultObj.title}\n`;
- output += `Link: ${resultObj.link}\n`;
- if (resultObj.snippet) {
- output += `Snippet: ${resultObj.snippet}\n`;
- }
-
- if (index < results.length - 1) {
- output += '\n';
- }
- });
-
- return output;
- }
-
- /**
- * Calls the tool with the provided input and returns a promise that resolves with a response from the Google Custom Search API.
- * @param {string} input - The input to provide to the API.
- * @returns {Promise} A promise that resolves with a response from the Google Custom Search API.
- */
- async _call(input) {
- try {
- const metadataResults = [];
- const response = await this.getCustomSearch().cse.list({
- q: input,
- cx: this.cx,
- auth: this.apiKey,
- num: 5, // Limit the number of results to 5
- });
-
- // return response.data;
- // logger.debug(response.data);
-
- if (!response.data.items || response.data.items.length === 0) {
- return this.resultsToReadableFormat([
- { title: 'No good Google Search Result was found', link: '' },
- ]);
- }
-
- // const results = response.items.slice(0, numResults);
- const results = response.data.items;
-
- for (const result of results) {
- const metadataResult = {
- title: result.title || '',
- link: result.link || '',
- };
- if (result.snippet) {
- metadataResult.snippet = result.snippet;
- }
- metadataResults.push(metadataResult);
- }
-
- return this.resultsToReadableFormat(metadataResults);
- } catch (error) {
- logger.error('[GoogleSearchAPI]', error);
- // throw error;
- return 'There was an error searching Google.';
- }
- }
-}
-
-module.exports = GoogleSearchAPI;
diff --git a/api/app/clients/tools/index.js b/api/app/clients/tools/index.js
index f5410e89eec..f16d229e6b7 100644
--- a/api/app/clients/tools/index.js
+++ b/api/app/clients/tools/index.js
@@ -1,35 +1,44 @@
-const GoogleSearchAPI = require('./GoogleSearch');
+const availableTools = require('./manifest.json');
+// Basic Tools
+const CodeBrew = require('./CodeBrew');
+const WolframAlphaAPI = require('./Wolfram');
+const AzureAiSearch = require('./AzureAiSearch');
const OpenAICreateImage = require('./DALL-E');
-const DALLE3 = require('./structured/DALLE3');
-const StructuredSD = require('./structured/StableDiffusion');
const StableDiffusionAPI = require('./StableDiffusion');
-const WolframAlphaAPI = require('./Wolfram');
-const StructuredWolfram = require('./structured/Wolfram');
const SelfReflectionTool = require('./SelfReflection');
-const AzureAiSearch = require('./AzureAiSearch');
-const StructuredACS = require('./structured/AzureAISearch');
+
+// Structured Tools
+const DALLE3 = require('./structured/DALLE3');
const ChatTool = require('./structured/ChatTool');
const E2BTools = require('./structured/E2BTools');
const CodeSherpa = require('./structured/CodeSherpa');
+const StructuredSD = require('./structured/StableDiffusion');
+const StructuredACS = require('./structured/AzureAISearch');
const CodeSherpaTools = require('./structured/CodeSherpaTools');
-const availableTools = require('./manifest.json');
-const CodeBrew = require('./CodeBrew');
+const GoogleSearchAPI = require('./structured/GoogleSearch');
+const StructuredWolfram = require('./structured/Wolfram');
+const TavilySearchResults = require('./structured/TavilySearchResults');
+const TraversaalSearch = require('./structured/TraversaalSearch');
module.exports = {
availableTools,
+ // Basic Tools
+ CodeBrew,
+ AzureAiSearch,
GoogleSearchAPI,
+ WolframAlphaAPI,
OpenAICreateImage,
- DALLE3,
StableDiffusionAPI,
- StructuredSD,
- WolframAlphaAPI,
- StructuredWolfram,
SelfReflectionTool,
- AzureAiSearch,
- StructuredACS,
- E2BTools,
+ // Structured Tools
+ DALLE3,
ChatTool,
+ E2BTools,
CodeSherpa,
+ StructuredSD,
+ StructuredACS,
CodeSherpaTools,
- CodeBrew,
+ StructuredWolfram,
+ TavilySearchResults,
+ TraversaalSearch,
};
diff --git a/api/app/clients/tools/manifest.json b/api/app/clients/tools/manifest.json
index bde4c8a87a9..3daaf9dd3bc 100644
--- a/api/app/clients/tools/manifest.json
+++ b/api/app/clients/tools/manifest.json
@@ -1,4 +1,17 @@
[
+ {
+ "name": "Traversaal",
+ "pluginKey": "traversaal_search",
+ "description": "Traversaal is a robust search API tailored for LLM Agents. Get an API key here: https://api.traversaal.ai",
+ "icon": "https://traversaal.ai/favicon.ico",
+ "authConfig": [
+ {
+ "authField": "TRAVERSAAL_API_KEY",
+ "label": "Traversaal API Key",
+ "description": "Get your API key here: https://api.traversaal.ai"
+ }
+ ]
+ },
{
"name": "Google",
"pluginKey": "google",
@@ -89,7 +102,7 @@
"icon": "https://i.imgur.com/u2TzXzH.png",
"authConfig": [
{
- "authField": "DALLE2_API_KEY",
+ "authField": "DALLE2_API_KEY||DALLE_API_KEY",
"label": "OpenAI API Key",
"description": "You can use DALL-E with your API Key from OpenAI."
}
@@ -102,12 +115,25 @@
"icon": "https://i.imgur.com/u2TzXzH.png",
"authConfig": [
{
- "authField": "DALLE3_API_KEY",
+ "authField": "DALLE3_API_KEY||DALLE_API_KEY",
"label": "OpenAI API Key",
"description": "You can use DALL-E with your API Key from OpenAI."
}
]
},
+ {
+ "name": "Tavily Search",
+ "pluginKey": "tavily_search_results_json",
+ "description": "Tavily Search is a robust search API tailored for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.",
+ "icon": "https://tavily.com/favicon.ico",
+ "authConfig": [
+ {
+ "authField": "TAVILY_API_KEY",
+ "label": "Tavily API Key",
+ "description": "Get your API key here: https://app.tavily.com/"
+ }
+ ]
+ },
{
"name": "Calculator",
"pluginKey": "calculator",
diff --git a/api/app/clients/tools/structured/AzureAISearch.js b/api/app/clients/tools/structured/AzureAISearch.js
index 9b50aa2c433..0ce7b43fb21 100644
--- a/api/app/clients/tools/structured/AzureAISearch.js
+++ b/api/app/clients/tools/structured/AzureAISearch.js
@@ -19,6 +19,13 @@ class AzureAISearch extends StructuredTool {
this.name = 'azure-ai-search';
this.description =
'Use the \'azure-ai-search\' tool to retrieve search results relevant to your input';
+ /* Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+
+ // Define schema
+ this.schema = z.object({
+ query: z.string().describe('Search word or phrase to Azure AI Search'),
+ });
// Initialize properties using helper function
this.serviceEndpoint = this._initializeField(
@@ -51,12 +58,16 @@ class AzureAISearch extends StructuredTool {
);
// Check for required fields
- if (!this.serviceEndpoint || !this.indexName || !this.apiKey) {
+ if (!this.override && (!this.serviceEndpoint || !this.indexName || !this.apiKey)) {
throw new Error(
'Missing AZURE_AI_SEARCH_SERVICE_ENDPOINT, AZURE_AI_SEARCH_INDEX_NAME, or AZURE_AI_SEARCH_API_KEY environment variable.',
);
}
+ if (this.override) {
+ return;
+ }
+
// Create SearchClient
this.client = new SearchClient(
this.serviceEndpoint,
@@ -64,11 +75,6 @@ class AzureAISearch extends StructuredTool {
new AzureKeyCredential(this.apiKey),
{ apiVersion: this.apiVersion },
);
-
- // Define schema
- this.schema = z.object({
- query: z.string().describe('Search word or phrase to Azure AI Search'),
- });
}
// Improved error handling and logging
diff --git a/api/app/clients/tools/structured/DALLE3.js b/api/app/clients/tools/structured/DALLE3.js
index 33df93e7fcf..3155992ca9b 100644
--- a/api/app/clients/tools/structured/DALLE3.js
+++ b/api/app/clients/tools/structured/DALLE3.js
@@ -4,42 +4,47 @@ const OpenAI = require('openai');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('langchain/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
+const { FileContext } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
-const { processFileURL } = require('~/server/services/Files/process');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
-const {
- DALLE3_SYSTEM_PROMPT,
- DALLE_REVERSE_PROXY,
- PROXY,
- DALLE3_AZURE_API_VERSION,
- DALLE3_BASEURL,
- DALLE3_API_KEY,
-} = process.env;
class DALLE3 extends Tool {
constructor(fields = {}) {
super();
+ /** @type {boolean} Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+ /** @type {boolean} Necessary for output to contain all image metadata. */
+ this.returnMetadata = fields.returnMetadata ?? false;
this.userId = fields.userId;
this.fileStrategy = fields.fileStrategy;
+ if (fields.processFileURL) {
+ /** @type {processFileURL} Necessary for output to contain all image metadata. */
+ this.processFileURL = fields.processFileURL.bind(this);
+ }
+
let apiKey = fields.DALLE3_API_KEY ?? fields.DALLE_API_KEY ?? this.getApiKey();
const config = { apiKey };
- if (DALLE_REVERSE_PROXY) {
- config.baseURL = extractBaseURL(DALLE_REVERSE_PROXY);
+ if (process.env.DALLE_REVERSE_PROXY) {
+ config.baseURL = extractBaseURL(process.env.DALLE_REVERSE_PROXY);
}
- if (DALLE3_AZURE_API_VERSION && DALLE3_BASEURL) {
- config.baseURL = DALLE3_BASEURL;
- config.defaultQuery = { 'api-version': DALLE3_AZURE_API_VERSION };
- config.defaultHeaders = { 'api-key': DALLE3_API_KEY, 'Content-Type': 'application/json' };
- config.apiKey = DALLE3_API_KEY;
+ if (process.env.DALLE3_AZURE_API_VERSION && process.env.DALLE3_BASEURL) {
+ config.baseURL = process.env.DALLE3_BASEURL;
+ config.defaultQuery = { 'api-version': process.env.DALLE3_AZURE_API_VERSION };
+ config.defaultHeaders = {
+ 'api-key': process.env.DALLE3_API_KEY,
+ 'Content-Type': 'application/json',
+ };
+ config.apiKey = process.env.DALLE3_API_KEY;
}
- if (PROXY) {
- config.httpAgent = new HttpsProxyAgent(PROXY);
+ if (process.env.PROXY) {
+ config.httpAgent = new HttpsProxyAgent(process.env.PROXY);
}
+ /** @type {OpenAI} */
this.openai = new OpenAI(config);
this.name = 'dalle';
this.description = `Use DALLE to create images from text descriptions.
@@ -47,7 +52,7 @@ class DALLE3 extends Tool {
- Create only one image, without repeating or listing descriptions outside the "prompts" field.
- Maintains the original intent of the description, with parameters for image style, quality, and size to tailor the output.`;
this.description_for_model =
- DALLE3_SYSTEM_PROMPT ??
+ process.env.DALLE3_SYSTEM_PROMPT ??
`// Whenever a description of an image is given, generate prompts (following these rules), and use dalle to create the image. If the user does not ask for a specific number of images, default to creating 2 prompts to send to dalle that are written to be as diverse as possible. All prompts sent to dalle must abide by the following policies:
// 1. Prompts must be in English. Translate to English if needed.
// 2. One image per function call. Create only 1 image per request unless explicitly told to generate more than 1 image.
@@ -86,7 +91,7 @@ class DALLE3 extends Tool {
getApiKey() {
const apiKey = process.env.DALLE3_API_KEY ?? process.env.DALLE_API_KEY ?? '';
- if (!apiKey) {
+ if (!apiKey && !this.override) {
throw new Error('Missing DALLE_API_KEY environment variable.');
}
return apiKey;
@@ -120,6 +125,7 @@ class DALLE3 extends Tool {
n: 1,
});
} catch (error) {
+ logger.error('[DALL-E-3] Problem generating the image:', error);
return `Something went wrong when trying to generate the image. The DALL-E API may be unavailable:
Error Message: ${error.message}`;
}
@@ -150,15 +156,20 @@ Error Message: ${error.message}`;
});
try {
- const result = await processFileURL({
+ const result = await this.processFileURL({
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: theImageUrl,
fileName: imageName,
basePath: 'images',
+ context: FileContext.image_generation,
});
- this.result = this.wrapInMarkdown(result);
+ if (this.returnMetadata) {
+ this.result = result;
+ } else {
+ this.result = this.wrapInMarkdown(result.filepath);
+ }
} catch (error) {
logger.error('Error while saving the image:', error);
this.result = `Failed to save the image locally. ${error.message}`;
diff --git a/api/app/clients/tools/structured/GoogleSearch.js b/api/app/clients/tools/structured/GoogleSearch.js
new file mode 100644
index 00000000000..92d33272c83
--- /dev/null
+++ b/api/app/clients/tools/structured/GoogleSearch.js
@@ -0,0 +1,65 @@
+const { z } = require('zod');
+const { Tool } = require('@langchain/core/tools');
+const { getEnvironmentVariable } = require('@langchain/core/utils/env');
+
+class GoogleSearchResults extends Tool {
+ static lc_name() {
+ return 'GoogleSearchResults';
+ }
+
+ constructor(fields = {}) {
+ super(fields);
+ this.envVarApiKey = 'GOOGLE_API_KEY';
+ this.envVarSearchEngineId = 'GOOGLE_CSE_ID';
+ this.override = fields.override ?? false;
+ this.apiKey = fields.apiKey ?? getEnvironmentVariable(this.envVarApiKey);
+ this.searchEngineId =
+ fields.searchEngineId ?? getEnvironmentVariable(this.envVarSearchEngineId);
+
+ this.kwargs = fields?.kwargs ?? {};
+ this.name = 'google';
+ this.description =
+ 'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.';
+
+ this.schema = z.object({
+ query: z.string().min(1).describe('The search query string.'),
+ max_results: z
+ .number()
+ .min(1)
+ .max(10)
+ .optional()
+ .describe('The maximum number of search results to return. Defaults to 10.'),
+ // Note: Google API has its own parameters for search customization, adjust as needed.
+ });
+ }
+
+ async _call(input) {
+ const validationResult = this.schema.safeParse(input);
+ if (!validationResult.success) {
+ throw new Error(`Validation failed: ${JSON.stringify(validationResult.error.issues)}`);
+ }
+
+ const { query, max_results = 5 } = validationResult.data;
+
+ const response = await fetch(
+ `https://www.googleapis.com/customsearch/v1?key=${this.apiKey}&cx=${
+ this.searchEngineId
+ }&q=${encodeURIComponent(query)}&num=${max_results}`,
+ {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ },
+ );
+
+ const json = await response.json();
+ if (!response.ok) {
+ throw new Error(`Request failed with status ${response.status}: ${json.error.message}`);
+ }
+
+ return JSON.stringify(json);
+ }
+}
+
+module.exports = GoogleSearchResults;
diff --git a/api/app/clients/tools/structured/StableDiffusion.js b/api/app/clients/tools/structured/StableDiffusion.js
index 1fc5096730e..e891cbb398a 100644
--- a/api/app/clients/tools/structured/StableDiffusion.js
+++ b/api/app/clients/tools/structured/StableDiffusion.js
@@ -4,12 +4,28 @@ const { z } = require('zod');
const path = require('path');
const axios = require('axios');
const sharp = require('sharp');
+const { v4: uuidv4 } = require('uuid');
const { StructuredTool } = require('langchain/tools');
+const { FileContext } = require('librechat-data-provider');
+const paths = require('~/config/paths');
const { logger } = require('~/config');
class StableDiffusionAPI extends StructuredTool {
constructor(fields) {
super();
+ /** @type {string} User ID */
+ this.userId = fields.userId;
+ /** @type {Express.Request | undefined} Express Request object, only provided by ToolService */
+ this.req = fields.req;
+ /** @type {boolean} Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+ /** @type {boolean} Necessary for output to contain all image metadata. */
+ this.returnMetadata = fields.returnMetadata ?? false;
+ if (fields.uploadImageBuffer) {
+ /** @type {uploadImageBuffer} Necessary for output to contain all image metadata. */
+ this.uploadImageBuffer = fields.uploadImageBuffer.bind(this);
+ }
+
this.name = 'stable-diffusion';
this.url = fields.SD_WEBUI_URL || this.getServerURL();
this.description_for_model = `// Generate images and visuals using text.
@@ -44,7 +60,7 @@ class StableDiffusionAPI extends StructuredTool {
getMarkdownImageUrl(imageName) {
const imageUrl = path
- .join(this.relativeImageUrl, imageName)
+ .join(this.relativePath, this.userId, imageName)
.replace(/\\/g, '/')
.replace('public/', '');
return `![generated image](/${imageUrl})`;
@@ -52,7 +68,7 @@ class StableDiffusionAPI extends StructuredTool {
getServerURL() {
const url = process.env.SD_WEBUI_URL || '';
- if (!url) {
+ if (!url && !this.override) {
throw new Error('Missing SD_WEBUI_URL environment variable.');
}
return url;
@@ -70,46 +86,67 @@ class StableDiffusionAPI extends StructuredTool {
width: 1024,
height: 1024,
};
- const response = await axios.post(`${url}/sdapi/v1/txt2img`, payload);
- const image = response.data.images[0];
- const pngPayload = { image: `data:image/png;base64,${image}` };
- const response2 = await axios.post(`${url}/sdapi/v1/png-info`, pngPayload);
- const info = response2.data.info;
+ const generationResponse = await axios.post(`${url}/sdapi/v1/txt2img`, payload);
+ const image = generationResponse.data.images[0];
+
+ /** @type {{ height: number, width: number, seed: number, infotexts: string[] }} */
+ let info = {};
+ try {
+ info = JSON.parse(generationResponse.data.info);
+ } catch (error) {
+ logger.error('[StableDiffusion] Error while getting image metadata:', error);
+ }
- // Generate unique name
- const imageName = `${Date.now()}.png`;
- this.outputPath = path.resolve(
- __dirname,
- '..',
- '..',
- '..',
- '..',
- '..',
- 'client',
- 'public',
- 'images',
- );
- const appRoot = path.resolve(__dirname, '..', '..', '..', '..', '..', 'client');
- this.relativeImageUrl = path.relative(appRoot, this.outputPath);
+ const file_id = uuidv4();
+ const imageName = `${file_id}.png`;
+ const { imageOutput: imageOutputPath, clientPath } = paths;
+ const filepath = path.join(imageOutputPath, this.userId, imageName);
+ this.relativePath = path.relative(clientPath, imageOutputPath);
- // Check if directory exists, if not create it
- if (!fs.existsSync(this.outputPath)) {
- fs.mkdirSync(this.outputPath, { recursive: true });
+ if (!fs.existsSync(path.join(imageOutputPath, this.userId))) {
+ fs.mkdirSync(path.join(imageOutputPath, this.userId), { recursive: true });
}
try {
const buffer = Buffer.from(image.split(',', 1)[0], 'base64');
+ if (this.returnMetadata && this.uploadImageBuffer && this.req) {
+ const file = await this.uploadImageBuffer({
+ req: this.req,
+ context: FileContext.image_generation,
+ resize: false,
+ metadata: {
+ buffer,
+ height: info.height,
+ width: info.width,
+ bytes: Buffer.byteLength(buffer),
+ filename: imageName,
+ type: 'image/png',
+ file_id,
+ },
+ });
+
+ const generationInfo = info.infotexts[0].split('\n').pop();
+ return {
+ ...file,
+ prompt,
+ metadata: {
+ negative_prompt,
+ seed: info.seed,
+ info: generationInfo,
+ },
+ };
+ }
+
await sharp(buffer)
.withMetadata({
iptcpng: {
- parameters: info,
+ parameters: info.infotexts[0],
},
})
- .toFile(this.outputPath + '/' + imageName);
+ .toFile(filepath);
this.result = this.getMarkdownImageUrl(imageName);
} catch (error) {
logger.error('[StableDiffusion] Error while saving the image:', error);
- // this.result = theImageUrl;
}
return this.result;
diff --git a/api/app/clients/tools/structured/TavilySearchResults.js b/api/app/clients/tools/structured/TavilySearchResults.js
new file mode 100644
index 00000000000..3945ac1d00f
--- /dev/null
+++ b/api/app/clients/tools/structured/TavilySearchResults.js
@@ -0,0 +1,92 @@
+const { z } = require('zod');
+const { Tool } = require('@langchain/core/tools');
+const { getEnvironmentVariable } = require('@langchain/core/utils/env');
+
+class TavilySearchResults extends Tool {
+ static lc_name() {
+ return 'TavilySearchResults';
+ }
+
+ constructor(fields = {}) {
+ super(fields);
+ this.envVar = 'TAVILY_API_KEY';
+ /* Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+ this.apiKey = fields.apiKey ?? this.getApiKey();
+
+ this.kwargs = fields?.kwargs ?? {};
+ this.name = 'tavily_search_results_json';
+ this.description =
+ 'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.';
+
+ this.schema = z.object({
+ query: z.string().min(1).describe('The search query string.'),
+ max_results: z
+ .number()
+ .min(1)
+ .max(10)
+ .optional()
+ .describe('The maximum number of search results to return. Defaults to 5.'),
+ search_depth: z
+ .enum(['basic', 'advanced'])
+ .optional()
+ .describe(
+ 'The depth of the search, affecting result quality and response time (`basic` or `advanced`). Default is basic for quick results and advanced for indepth high quality results but longer response time. Advanced calls equals 2 requests.',
+ ),
+ include_images: z
+ .boolean()
+ .optional()
+ .describe(
+ 'Whether to include a list of query-related images in the response. Default is False.',
+ ),
+ include_answer: z
+ .boolean()
+ .optional()
+ .describe('Whether to include answers in the search results. Default is False.'),
+ // include_raw_content: z.boolean().optional().describe('Whether to include raw content in the search results. Default is False.'),
+ // include_domains: z.array(z.string()).optional().describe('A list of domains to specifically include in the search results.'),
+ // exclude_domains: z.array(z.string()).optional().describe('A list of domains to specifically exclude from the search results.'),
+ });
+ }
+
+ getApiKey() {
+ const apiKey = getEnvironmentVariable(this.envVar);
+ if (!apiKey && !this.override) {
+ throw new Error(`Missing ${this.envVar} environment variable.`);
+ }
+ return apiKey;
+ }
+
+ async _call(input) {
+ const validationResult = this.schema.safeParse(input);
+ if (!validationResult.success) {
+ throw new Error(`Validation failed: ${JSON.stringify(validationResult.error.issues)}`);
+ }
+
+ const { query, ...rest } = validationResult.data;
+
+ const requestBody = {
+ api_key: this.apiKey,
+ query,
+ ...rest,
+ ...this.kwargs,
+ };
+
+ const response = await fetch('https://api.tavily.com/search', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(requestBody),
+ });
+
+ const json = await response.json();
+ if (!response.ok) {
+ throw new Error(`Request failed with status ${response.status}: ${json.error}`);
+ }
+
+ return JSON.stringify(json);
+ }
+}
+
+module.exports = TavilySearchResults;
diff --git a/api/app/clients/tools/structured/TraversaalSearch.js b/api/app/clients/tools/structured/TraversaalSearch.js
new file mode 100644
index 00000000000..e8ceeda134f
--- /dev/null
+++ b/api/app/clients/tools/structured/TraversaalSearch.js
@@ -0,0 +1,89 @@
+const { z } = require('zod');
+const { Tool } = require('@langchain/core/tools');
+const { getEnvironmentVariable } = require('@langchain/core/utils/env');
+const { logger } = require('~/config');
+
+/**
+ * Tool for the Traversaal AI search API, Ares.
+ */
+class TraversaalSearch extends Tool {
+ static lc_name() {
+ return 'TraversaalSearch';
+ }
+ constructor(fields) {
+ super(fields);
+ this.name = 'traversaal_search';
+ this.description = `An AI search engine optimized for comprehensive, accurate, and trusted results.
+ Useful for when you need to answer questions about current events. Input should be a search query.`;
+ this.description_for_model =
+ '\'Please create a specific sentence for the AI to understand and use as a query to search the web based on the user\'s request. For example, "Find information about the highest mountains in the world." or "Show me the latest news articles about climate change and its impact on polar ice caps."\'';
+ this.schema = z.object({
+ query: z
+ .string()
+ .describe(
+ 'A properly written sentence to be interpreted by an AI to search the web according to the user\'s request.',
+ ),
+ });
+
+ this.apiKey = fields?.TRAVERSAAL_API_KEY ?? this.getApiKey();
+ }
+
+ getApiKey() {
+ const apiKey = getEnvironmentVariable('TRAVERSAAL_API_KEY');
+ if (!apiKey && this.override) {
+ throw new Error(
+ 'No Traversaal API key found. Either set an environment variable named "TRAVERSAAL_API_KEY" or pass an API key as "apiKey".',
+ );
+ }
+ return apiKey;
+ }
+
+ // eslint-disable-next-line no-unused-vars
+ async _call({ query }, _runManager) {
+ const body = {
+ query: [query],
+ };
+ try {
+ const response = await fetch('https://api-ares.traversaal.ai/live/predict', {
+ method: 'POST',
+ headers: {
+ 'content-type': 'application/json',
+ 'x-api-key': this.apiKey,
+ },
+ body: JSON.stringify({ ...body }),
+ });
+ const json = await response.json();
+ if (!response.ok) {
+ throw new Error(
+ `Request failed with status code ${response.status}: ${json.error ?? json.message}`,
+ );
+ }
+ if (!json.data) {
+ throw new Error('Could not parse Traversaal API results. Please try again.');
+ }
+
+ const baseText = json.data?.response_text ?? '';
+ const sources = json.data?.web_url;
+ const noResponse = 'No response found in Traversaal API results';
+
+ if (!baseText && !sources) {
+ return noResponse;
+ }
+
+ const sourcesText = sources?.length ? '\n\nSources:\n - ' + sources.join('\n - ') : '';
+
+ const result = baseText + sourcesText;
+
+ if (!result) {
+ return noResponse;
+ }
+
+ return result;
+ } catch (error) {
+ logger.error('Traversaal API request failed', error);
+ return `Traversaal API request failed: ${error.message}`;
+ }
+ }
+}
+
+module.exports = TraversaalSearch;
diff --git a/api/app/clients/tools/structured/Wolfram.js b/api/app/clients/tools/structured/Wolfram.js
index 2c5c6e023a1..fc857b35cb2 100644
--- a/api/app/clients/tools/structured/Wolfram.js
+++ b/api/app/clients/tools/structured/Wolfram.js
@@ -7,6 +7,9 @@ const { logger } = require('~/config');
class WolframAlphaAPI extends StructuredTool {
constructor(fields) {
super();
+ /* Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+
this.name = 'wolfram';
this.apiKey = fields.WOLFRAM_APP_ID || this.getAppId();
this.description_for_model = `// Access dynamic computation and curated data from WolframAlpha and Wolfram Cloud.
@@ -55,7 +58,7 @@ class WolframAlphaAPI extends StructuredTool {
getAppId() {
const appId = process.env.WOLFRAM_APP_ID || '';
- if (!appId) {
+ if (!appId && !this.override) {
throw new Error('Missing WOLFRAM_APP_ID environment variable.');
}
return appId;
diff --git a/api/app/clients/tools/structured/specs/DALLE3.spec.js b/api/app/clients/tools/structured/specs/DALLE3.spec.js
index 58771b1459e..1b28de2faf1 100644
--- a/api/app/clients/tools/structured/specs/DALLE3.spec.js
+++ b/api/app/clients/tools/structured/specs/DALLE3.spec.js
@@ -1,14 +1,11 @@
const OpenAI = require('openai');
const DALLE3 = require('../DALLE3');
-const { processFileURL } = require('~/server/services/Files/process');
const { logger } = require('~/config');
jest.mock('openai');
-jest.mock('~/server/services/Files/process', () => ({
- processFileURL: jest.fn(),
-}));
+const processFileURL = jest.fn();
jest.mock('~/server/services/Files/images', () => ({
getImageBasename: jest.fn().mockImplementation((url) => {
@@ -69,7 +66,7 @@ describe('DALLE3', () => {
jest.resetModules();
process.env = { ...originalEnv, DALLE_API_KEY: mockApiKey };
// Instantiate DALLE3 for tests that do not depend on DALLE3_SYSTEM_PROMPT
- dalle = new DALLE3();
+ dalle = new DALLE3({ processFileURL });
});
afterEach(() => {
@@ -78,7 +75,8 @@ describe('DALLE3', () => {
process.env = originalEnv;
});
- it('should throw an error if DALLE_API_KEY is missing', () => {
+ it('should throw an error if all potential API keys are missing', () => {
+ delete process.env.DALLE3_API_KEY;
delete process.env.DALLE_API_KEY;
expect(() => new DALLE3()).toThrow('Missing DALLE_API_KEY environment variable.');
});
@@ -112,7 +110,9 @@ describe('DALLE3', () => {
};
generate.mockResolvedValue(mockResponse);
- processFileURL.mockResolvedValue('http://example.com/img-test.png');
+ processFileURL.mockResolvedValue({
+ filepath: 'http://example.com/img-test.png',
+ });
const result = await dalle._call(mockData);
diff --git a/api/app/clients/tools/util/handleTools.js b/api/app/clients/tools/util/handleTools.js
index 1d9a3a00749..7ed18658711 100644
--- a/api/app/clients/tools/util/handleTools.js
+++ b/api/app/clients/tools/util/handleTools.js
@@ -6,19 +6,23 @@ const { OpenAIEmbeddings } = require('langchain/embeddings/openai');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const {
availableTools,
+ // Basic Tools
+ CodeBrew,
+ AzureAISearch,
GoogleSearchAPI,
WolframAlphaAPI,
- StructuredWolfram,
OpenAICreateImage,
StableDiffusionAPI,
+ // Structured Tools
DALLE3,
- StructuredSD,
- AzureAISearch,
- StructuredACS,
E2BTools,
CodeSherpa,
+ StructuredSD,
+ StructuredACS,
CodeSherpaTools,
- CodeBrew,
+ TraversaalSearch,
+ StructuredWolfram,
+ TavilySearchResults,
} = require('../');
const { loadToolSuite } = require('./loadToolSuite');
const { loadSpecs } = require('./loadSpecs');
@@ -30,6 +34,14 @@ const getOpenAIKey = async (options, user) => {
return openAIApiKey || (await getUserPluginAuthValue(user, 'OPENAI_API_KEY'));
};
+/**
+ * Validates the availability and authentication of tools for a user based on environment variables or user-specific plugin authentication values.
+ * Tools without required authentication or with valid authentication are considered valid.
+ *
+ * @param {Object} user The user object for whom to validate tool access.
+ * @param {Array} tools An array of tool identifiers to validate. Defaults to an empty array.
+ * @returns {Promise>} A promise that resolves to an array of valid tool identifiers.
+ */
const validateTools = async (user, tools = []) => {
try {
const validToolsSet = new Set(tools);
@@ -37,16 +49,34 @@ const validateTools = async (user, tools = []) => {
validToolsSet.has(tool.pluginKey),
);
+ /**
+ * Validates the credentials for a given auth field or set of alternate auth fields for a tool.
+ * If valid admin or user authentication is found, the function returns early. Otherwise, it removes the tool from the set of valid tools.
+ *
+ * @param {string} authField The authentication field or fields (separated by "||" for alternates) to validate.
+ * @param {string} toolName The identifier of the tool being validated.
+ */
const validateCredentials = async (authField, toolName) => {
- const adminAuth = process.env[authField];
- if (adminAuth && adminAuth.length > 0) {
- return;
+ const fields = authField.split('||');
+ for (const field of fields) {
+ const adminAuth = process.env[field];
+ if (adminAuth && adminAuth.length > 0) {
+ return;
+ }
+
+ let userAuth = null;
+ try {
+ userAuth = await getUserPluginAuthValue(user, field);
+ } catch (err) {
+ if (field === fields[fields.length - 1] && !userAuth) {
+ throw err;
+ }
+ }
+ if (userAuth && userAuth.length > 0) {
+ return;
+ }
}
- const userAuth = await getUserPluginAuthValue(user, authField);
- if (userAuth && userAuth.length > 0) {
- return;
- }
validToolsSet.delete(toolName);
};
@@ -63,20 +93,55 @@ const validateTools = async (user, tools = []) => {
return Array.from(validToolsSet.values());
} catch (err) {
logger.error('[validateTools] There was a problem validating tools', err);
- throw new Error(err);
+ throw new Error('There was a problem validating tools');
}
};
-const loadToolWithAuth = async (userId, authFields, ToolConstructor, options = {}) => {
+/**
+ * Initializes a tool with authentication values for the given user, supporting alternate authentication fields.
+ * Authentication fields can have alternates separated by "||", and the first defined variable will be used.
+ *
+ * @param {string} userId The user ID for which the tool is being loaded.
+ * @param {Array} authFields Array of strings representing the authentication fields. Supports alternate fields delimited by "||".
+ * @param {typeof import('langchain/tools').Tool} ToolConstructor The constructor function for the tool to be initialized.
+ * @param {Object} options Optional parameters to be passed to the tool constructor alongside authentication values.
+ * @returns {Function} An Async function that, when called, asynchronously initializes and returns an instance of the tool with authentication.
+ */
+const loadToolWithAuth = (userId, authFields, ToolConstructor, options = {}) => {
return async function () {
let authValues = {};
- for (const authField of authFields) {
- let authValue = process.env[authField];
- if (!authValue) {
- authValue = await getUserPluginAuthValue(userId, authField);
+ /**
+ * Finds the first non-empty value for the given authentication field, supporting alternate fields.
+ * @param {string[]} fields Array of strings representing the authentication fields. Supports alternate fields delimited by "||".
+ * @returns {Promise<{ authField: string, authValue: string} | null>} An object containing the authentication field and value, or null if not found.
+ */
+ const findAuthValue = async (fields) => {
+ for (const field of fields) {
+ let value = process.env[field];
+ if (value) {
+ return { authField: field, authValue: value };
+ }
+ try {
+ value = await getUserPluginAuthValue(userId, field);
+ } catch (err) {
+ if (field === fields[fields.length - 1] && !value) {
+ throw err;
+ }
+ }
+ if (value) {
+ return { authField: field, authValue: value };
+ }
+ }
+ return null;
+ };
+
+ for (let authField of authFields) {
+ const fields = authField.split('||');
+ const result = await findAuthValue(fields);
+ if (result) {
+ authValues[result.authField] = result.authValue;
}
- authValues[authField] = authValue;
}
return new ToolConstructor({ ...options, ...authValues, userId });
@@ -90,8 +155,10 @@ const loadTools = async ({
returnMap = false,
tools = [],
options = {},
+ skipSpecs = false,
}) => {
const toolConstructors = {
+ tavily_search_results_json: TavilySearchResults,
calculator: Calculator,
google: GoogleSearchAPI,
wolfram: functions ? StructuredWolfram : WolframAlphaAPI,
@@ -99,6 +166,7 @@ const loadTools = async ({
'stable-diffusion': functions ? StructuredSD : StableDiffusionAPI,
'azure-ai-search': functions ? StructuredACS : AzureAISearch,
CodeBrew: CodeBrew,
+ traversaal_search: TraversaalSearch,
};
const openAIApiKey = await getOpenAIKey(options, user);
@@ -168,10 +236,19 @@ const loadTools = async ({
toolConstructors.codesherpa = CodeSherpa;
}
+ const imageGenOptions = {
+ req: options.req,
+ fileStrategy: options.fileStrategy,
+ processFileURL: options.processFileURL,
+ returnMetadata: options.returnMetadata,
+ uploadImageBuffer: options.uploadImageBuffer,
+ };
+
const toolOptions = {
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
- dalle: { fileStrategy: options.fileStrategy },
- 'dall-e': { fileStrategy: options.fileStrategy },
+ dalle: imageGenOptions,
+ 'dall-e': imageGenOptions,
+ 'stable-diffusion': imageGenOptions,
};
const toolAuthFields = {};
@@ -194,7 +271,7 @@ const loadTools = async ({
if (toolConstructors[tool]) {
const options = toolOptions[tool] || {};
- const toolInstance = await loadToolWithAuth(
+ const toolInstance = loadToolWithAuth(
user,
toolAuthFields[tool],
toolConstructors[tool],
@@ -210,7 +287,7 @@ const loadTools = async ({
}
let specs = null;
- if (functions && remainingTools.length > 0) {
+ if (functions && remainingTools.length > 0 && skipSpecs !== true) {
specs = await loadSpecs({
llm: model,
user,
@@ -237,6 +314,9 @@ const loadTools = async ({
let result = [];
for (const tool of tools) {
const validTool = requestedTools[tool];
+ if (!validTool) {
+ continue;
+ }
const plugin = await validTool();
if (Array.isArray(plugin)) {
@@ -250,6 +330,7 @@ const loadTools = async ({
};
module.exports = {
+ loadToolWithAuth,
validateTools,
loadTools,
};
diff --git a/api/app/clients/tools/util/handleTools.test.js b/api/app/clients/tools/util/handleTools.test.js
index 40d8bc6129e..2c977714275 100644
--- a/api/app/clients/tools/util/handleTools.test.js
+++ b/api/app/clients/tools/util/handleTools.test.js
@@ -4,26 +4,33 @@ const mockUser = {
findByIdAndDelete: jest.fn(),
};
-var mockPluginService = {
+const mockPluginService = {
updateUserPluginAuth: jest.fn(),
deleteUserPluginAuth: jest.fn(),
getUserPluginAuthValue: jest.fn(),
};
-jest.mock('../../../../models/User', () => {
+jest.mock('~/models/User', () => {
return function () {
return mockUser;
};
});
-jest.mock('../../../../server/services/PluginService', () => mockPluginService);
+jest.mock('~/server/services/PluginService', () => mockPluginService);
-const User = require('../../../../models/User');
-const { validateTools, loadTools } = require('./');
-const PluginService = require('../../../../server/services/PluginService');
-const { BaseChatModel } = require('langchain/chat_models/openai');
const { Calculator } = require('langchain/tools/calculator');
-const { availableTools, OpenAICreateImage, GoogleSearchAPI, StructuredSD } = require('../');
+const { BaseChatModel } = require('langchain/chat_models/openai');
+
+const User = require('~/models/User');
+const PluginService = require('~/server/services/PluginService');
+const { validateTools, loadTools, loadToolWithAuth } = require('./handleTools');
+const {
+ availableTools,
+ OpenAICreateImage,
+ GoogleSearchAPI,
+ StructuredSD,
+ WolframAlphaAPI,
+} = require('../');
describe('Tool Handlers', () => {
let fakeUser;
@@ -44,7 +51,10 @@ describe('Tool Handlers', () => {
});
mockPluginService.updateUserPluginAuth.mockImplementation(
(userId, authField, _pluginKey, credential) => {
- userAuthValues[`${userId}-${authField}`] = credential;
+ const fields = authField.split('||');
+ fields.forEach((field) => {
+ userAuthValues[`${userId}-${field}`] = credential;
+ });
},
);
@@ -53,6 +63,7 @@ describe('Tool Handlers', () => {
username: 'fakeuser',
email: 'fakeuser@example.com',
emailVerified: false,
+ // file deepcode ignore NoHardcodedPasswords/test: fake value
password: 'fakepassword123',
avatar: '',
provider: 'local',
@@ -133,6 +144,18 @@ describe('Tool Handlers', () => {
loadTool2 = toolFunctions[sampleTools[1]];
loadTool3 = toolFunctions[sampleTools[2]];
});
+
+ let originalEnv;
+
+ beforeEach(() => {
+ originalEnv = process.env;
+ process.env = { ...originalEnv };
+ });
+
+ afterEach(() => {
+ process.env = originalEnv;
+ });
+
it('returns the expected load functions for requested tools', async () => {
expect(loadTool1).toBeDefined();
expect(loadTool2).toBeDefined();
@@ -149,6 +172,86 @@ describe('Tool Handlers', () => {
expect(authTool).toBeInstanceOf(ToolClass);
expect(tool).toBeInstanceOf(ToolClass2);
});
+
+ it('should initialize an authenticated tool with primary auth field', async () => {
+ process.env.DALLE2_API_KEY = 'mocked_api_key';
+ const initToolFunction = loadToolWithAuth(
+ 'userId',
+ ['DALLE2_API_KEY||DALLE_API_KEY'],
+ ToolClass,
+ );
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(ToolClass);
+ expect(mockPluginService.getUserPluginAuthValue).not.toHaveBeenCalled();
+ });
+
+ it('should initialize an authenticated tool with alternate auth field when primary is missing', async () => {
+ delete process.env.DALLE2_API_KEY; // Ensure the primary key is not set
+ process.env.DALLE_API_KEY = 'mocked_alternate_api_key';
+ const initToolFunction = loadToolWithAuth(
+ 'userId',
+ ['DALLE2_API_KEY||DALLE_API_KEY'],
+ ToolClass,
+ );
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(ToolClass);
+ expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledTimes(1);
+ expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledWith(
+ 'userId',
+ 'DALLE2_API_KEY',
+ );
+ });
+
+ it('should fallback to getUserPluginAuthValue when env vars are missing', async () => {
+ mockPluginService.updateUserPluginAuth('userId', 'DALLE_API_KEY', 'dalle', 'mocked_api_key');
+ const initToolFunction = loadToolWithAuth(
+ 'userId',
+ ['DALLE2_API_KEY||DALLE_API_KEY'],
+ ToolClass,
+ );
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(ToolClass);
+ expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledTimes(2);
+ });
+
+ it('should initialize an authenticated tool with singular auth field', async () => {
+ process.env.WOLFRAM_APP_ID = 'mocked_app_id';
+ const initToolFunction = loadToolWithAuth('userId', ['WOLFRAM_APP_ID'], WolframAlphaAPI);
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(WolframAlphaAPI);
+ expect(mockPluginService.getUserPluginAuthValue).not.toHaveBeenCalled();
+ });
+
+ it('should initialize an authenticated tool when env var is set', async () => {
+ process.env.WOLFRAM_APP_ID = 'mocked_app_id';
+ const initToolFunction = loadToolWithAuth('userId', ['WOLFRAM_APP_ID'], WolframAlphaAPI);
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(WolframAlphaAPI);
+ expect(mockPluginService.getUserPluginAuthValue).not.toHaveBeenCalledWith(
+ 'userId',
+ 'WOLFRAM_APP_ID',
+ );
+ });
+
+ it('should fallback to getUserPluginAuthValue when singular env var is missing', async () => {
+ delete process.env.WOLFRAM_APP_ID; // Ensure the environment variable is not set
+ mockPluginService.getUserPluginAuthValue.mockResolvedValue('mocked_user_auth_value');
+ const initToolFunction = loadToolWithAuth('userId', ['WOLFRAM_APP_ID'], WolframAlphaAPI);
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(WolframAlphaAPI);
+ expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledTimes(1);
+ expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledWith(
+ 'userId',
+ 'WOLFRAM_APP_ID',
+ );
+ });
+
it('should throw an error for an unauthenticated tool', async () => {
try {
await loadTool2();
diff --git a/api/app/clients/tools/util/loadToolSuite.js b/api/app/clients/tools/util/loadToolSuite.js
index 2b4500a4f77..4392d61b9a6 100644
--- a/api/app/clients/tools/util/loadToolSuite.js
+++ b/api/app/clients/tools/util/loadToolSuite.js
@@ -1,17 +1,49 @@
-const { getUserPluginAuthValue } = require('../../../../server/services/PluginService');
+const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const { availableTools } = require('../');
+const { logger } = require('~/config');
-const loadToolSuite = async ({ pluginKey, tools, user, options }) => {
+/**
+ * Loads a suite of tools with authentication values for a given user, supporting alternate authentication fields.
+ * Authentication fields can have alternates separated by "||", and the first defined variable will be used.
+ *
+ * @param {Object} params Parameters for loading the tool suite.
+ * @param {string} params.pluginKey Key identifying the plugin whose tools are to be loaded.
+ * @param {Array} params.tools Array of tool constructor functions.
+ * @param {Object} params.user User object for whom the tools are being loaded.
+ * @param {Object} [params.options={}] Optional parameters to be passed to each tool constructor.
+ * @returns {Promise} A promise that resolves to an array of instantiated tools.
+ */
+const loadToolSuite = async ({ pluginKey, tools, user, options = {} }) => {
const authConfig = availableTools.find((tool) => tool.pluginKey === pluginKey).authConfig;
const suite = [];
const authValues = {};
+ const findAuthValue = async (authField) => {
+ const fields = authField.split('||');
+ for (const field of fields) {
+ let value = process.env[field];
+ if (value) {
+ return value;
+ }
+ try {
+ value = await getUserPluginAuthValue(user, field);
+ if (value) {
+ return value;
+ }
+ } catch (err) {
+ logger.error(`Error fetching plugin auth value for ${field}: ${err.message}`);
+ }
+ }
+ return null;
+ };
+
for (const auth of authConfig) {
- let authValue = process.env[auth.authField];
- if (!authValue) {
- authValue = await getUserPluginAuthValue(user, auth.authField);
+ const authValue = await findAuthValue(auth.authField);
+ if (authValue !== null) {
+ authValues[auth.authField] = authValue;
+ } else {
+ logger.warn(`[loadToolSuite] No auth value found for ${auth.authField}`);
}
- authValues[auth.authField] = authValue;
}
for (const tool of tools) {
diff --git a/api/cache/getLogStores.js b/api/cache/getLogStores.js
index 016c7700009..786bb1f1f74 100644
--- a/api/cache/getLogStores.js
+++ b/api/cache/getLogStores.js
@@ -1,5 +1,5 @@
const Keyv = require('keyv');
-const { CacheKeys } = require('librechat-data-provider');
+const { CacheKeys, ViolationTypes } = require('librechat-data-provider');
const { logFile, violationFile } = require('./keyvFiles');
const { math, isEnabled } = require('~/server/utils');
const keyvRedis = require('./keyvRedis');
@@ -23,6 +23,22 @@ const config = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.CONFIG_STORE });
+const tokenConfig = isEnabled(USE_REDIS) // ttl: 30 minutes
+ ? new Keyv({ store: keyvRedis, ttl: 1800000 })
+ : new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: 1800000 });
+
+const genTitle = isEnabled(USE_REDIS) // ttl: 2 minutes
+ ? new Keyv({ store: keyvRedis, ttl: 120000 })
+ : new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: 120000 });
+
+const modelQueries = isEnabled(process.env.USE_REDIS)
+ ? new Keyv({ store: keyvRedis })
+ : new Keyv({ namespace: CacheKeys.MODEL_QUERIES });
+
+const abortKeys = isEnabled(USE_REDIS)
+ ? new Keyv({ store: keyvRedis })
+ : new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: 600000 });
+
const namespaces = {
[CacheKeys.CONFIG_STORE]: config,
pending_req,
@@ -31,9 +47,17 @@ const namespaces = {
concurrent: createViolationInstance('concurrent'),
non_browser: createViolationInstance('non_browser'),
message_limit: createViolationInstance('message_limit'),
- token_balance: createViolationInstance('token_balance'),
+ token_balance: createViolationInstance(ViolationTypes.TOKEN_BALANCE),
registrations: createViolationInstance('registrations'),
+ [ViolationTypes.FILE_UPLOAD_LIMIT]: createViolationInstance(ViolationTypes.FILE_UPLOAD_LIMIT),
+ [ViolationTypes.ILLEGAL_MODEL_REQUEST]: createViolationInstance(
+ ViolationTypes.ILLEGAL_MODEL_REQUEST,
+ ),
logins: createViolationInstance('logins'),
+ [CacheKeys.ABORT_KEYS]: abortKeys,
+ [CacheKeys.TOKEN_CONFIG]: tokenConfig,
+ [CacheKeys.GEN_TITLE]: genTitle,
+ [CacheKeys.MODEL_QUERIES]: modelQueries,
};
/**
diff --git a/api/config/parsers.js b/api/config/parsers.js
index 59685eab0bf..16c85cba4f4 100644
--- a/api/config/parsers.js
+++ b/api/config/parsers.js
@@ -33,6 +33,10 @@ function getMatchingSensitivePatterns(valueStr) {
* @returns {string} - The redacted console message.
*/
function redactMessage(str) {
+ if (!str) {
+ return '';
+ }
+
const patterns = getMatchingSensitivePatterns(str);
if (patterns.length === 0) {
diff --git a/api/config/paths.js b/api/config/paths.js
index 41e3ac5054f..165e9e6cd4f 100644
--- a/api/config/paths.js
+++ b/api/config/paths.js
@@ -1,7 +1,14 @@
const path = require('path');
module.exports = {
+ root: path.resolve(__dirname, '..', '..'),
+ uploads: path.resolve(__dirname, '..', '..', 'uploads'),
+ clientPath: path.resolve(__dirname, '..', '..', 'client'),
dist: path.resolve(__dirname, '..', '..', 'client', 'dist'),
publicPath: path.resolve(__dirname, '..', '..', 'client', 'public'),
+ fonts: path.resolve(__dirname, '..', '..', 'client', 'public', 'fonts'),
+ assets: path.resolve(__dirname, '..', '..', 'client', 'public', 'assets'),
imageOutput: path.resolve(__dirname, '..', '..', 'client', 'public', 'images'),
+ structuredTools: path.resolve(__dirname, '..', 'app', 'clients', 'tools', 'structured'),
+ pluginManifest: path.resolve(__dirname, '..', 'app', 'clients', 'tools', 'manifest.json'),
};
diff --git a/api/config/winston.js b/api/config/winston.js
index 6cba153f163..81e972fbbc3 100644
--- a/api/config/winston.js
+++ b/api/config/winston.js
@@ -5,7 +5,15 @@ const { redactFormat, redactMessage, debugTraverse } = require('./parsers');
const logDir = path.join(__dirname, '..', 'logs');
-const { NODE_ENV, DEBUG_LOGGING = true, DEBUG_CONSOLE = false } = process.env;
+const { NODE_ENV, DEBUG_LOGGING = true, DEBUG_CONSOLE = false, CONSOLE_JSON = false } = process.env;
+
+const useConsoleJson =
+ (typeof CONSOLE_JSON === 'string' && CONSOLE_JSON?.toLowerCase() === 'true') ||
+ CONSOLE_JSON === true;
+
+const useDebugConsole =
+ (typeof DEBUG_CONSOLE === 'string' && DEBUG_CONSOLE?.toLowerCase() === 'true') ||
+ DEBUG_CONSOLE === true;
const levels = {
error: 0,
@@ -33,7 +41,7 @@ const level = () => {
const fileFormat = winston.format.combine(
redactFormat(),
- winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
+ winston.format.timestamp({ format: () => new Date().toISOString() }),
winston.format.errors({ stack: true }),
winston.format.splat(),
// redactErrors(),
@@ -99,14 +107,20 @@ const consoleFormat = winston.format.combine(
}),
);
-if (
- (typeof DEBUG_CONSOLE === 'string' && DEBUG_CONSOLE?.toLowerCase() === 'true') ||
- DEBUG_CONSOLE === true
-) {
+if (useDebugConsole) {
transports.push(
new winston.transports.Console({
level: 'debug',
- format: winston.format.combine(consoleFormat, debugTraverse),
+ format: useConsoleJson
+ ? winston.format.combine(fileFormat, debugTraverse, winston.format.json())
+ : winston.format.combine(fileFormat, debugTraverse),
+ }),
+ );
+} else if (useConsoleJson) {
+ transports.push(
+ new winston.transports.Console({
+ level: 'info',
+ format: winston.format.combine(fileFormat, winston.format.json()),
}),
);
} else {
diff --git a/api/models/Action.js b/api/models/Action.js
new file mode 100644
index 00000000000..5141569c103
--- /dev/null
+++ b/api/models/Action.js
@@ -0,0 +1,68 @@
+const mongoose = require('mongoose');
+const actionSchema = require('./schema/action');
+
+const Action = mongoose.model('action', actionSchema);
+
+/**
+ * Update an action with new data without overwriting existing properties,
+ * or create a new action if it doesn't exist.
+ *
+ * @param {Object} searchParams - The search parameters to find the action to update.
+ * @param {string} searchParams.action_id - The ID of the action to update.
+ * @param {string} searchParams.user - The user ID of the action's author.
+ * @param {Object} updateData - An object containing the properties to update.
+ * @returns {Promise