Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
61 changes: 57 additions & 4 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ OPENAI_API_KEY=""

# ENABLE_ANTHROPIC: Set to true to enable Anthropic as a language model provider.
ENABLE_ANTHROPIC=false
# ANTHROPIC_API_KEY: Your Anthropic API key for accessing models like Claude-3.
# ANTHROPIC_API_KEY: Your Anthropic API key for accessing models like Claude-3, Claude-4, and Claude-4.5.
ANTHROPIC_API_KEY=""

# ENABLE_AZURE: Set to true to enable Azure as a language model provider.
Expand All @@ -33,9 +33,28 @@ AZURE_GPT4O_MINI_API_KEY=""
AZURE_GPT4O_MINI_API_BASE=""
AZURE_GPT4O_MINI_API_VERSION=""

# Azure GPT-5 Model Configurations
ENABLE_AZURE_GPT5=false
AZURE_GPT5_DEPLOYMENT="gpt-5"
AZURE_GPT5_API_KEY=""
AZURE_GPT5_API_BASE=""
AZURE_GPT5_API_VERSION="2025-01-01-preview"

ENABLE_AZURE_GPT5_MINI=false
AZURE_GPT5_MINI_DEPLOYMENT="gpt-5-mini"
AZURE_GPT5_MINI_API_KEY=""
AZURE_GPT5_MINI_API_BASE=""
AZURE_GPT5_MINI_API_VERSION="2025-01-01-preview"

ENABLE_AZURE_GPT5_NANO=false
AZURE_GPT5_NANO_DEPLOYMENT="gpt-5-nano"
AZURE_GPT5_NANO_API_KEY=""
AZURE_GPT5_NANO_API_BASE=""
AZURE_GPT5_NANO_API_VERSION="2025-01-01-preview"

# ENABLE_GEMINI: Set to true to enable Gemini as a language model provider.
ENABLE_GEMINI=false
# GEMINI_API_KEY: Your Gemini API key for accessing models like GPT-4.
# GEMINI_API_KEY: Your Gemini API key for accessing models like Gemini 2.5 Pro.
GEMINI_API_KEY=""

# ENABLE_NOVITA: Set to true to enable Novita AI as a language model provider.
Expand All @@ -51,7 +70,10 @@ VOLCENGINE_API_KEY=""
VOLCENGINE_API_BASE="https://ark.cn-beijing.volces.com/api/v3"

# LLM_KEY: The chosen language model to use. This should be one of the models
# provided by the enabled LLM providers (e.g., OPENAI_GPT4_TURBO, OPENAI_GPT4V, ANTHROPIC_CLAUDE3, AZURE_OPENAI_GPT4V).
# provided by the enabled LLM providers (e.g., OPENAI_GPT5, OPENAI_GPT5_MINI, OPENAI_GPT5_NANO,
# AZURE_OPENAI_GPT5, AZURE_OPENAI_GPT5_MINI, AZURE_OPENAI_GPT5_NANO, ANTHROPIC_CLAUDE4.5_SONNET,
# ANTHROPIC_CLAUDE4_SONNET, ANTHROPIC_CLAUDE3.7_SONNET, OPENAI_GPT4_TURBO,
# OPENAI_GPT4V, ANTHROPIC_CLAUDE3, AZURE_OPENAI_GPT4V).
LLM_KEY=""
# a cheaper LLM providers to help finishing some small tasks, like custom selection or svg conversion. If empty, it will be the same as LLM_KEY
SECONDARY_LLM_KEY=""
Expand All @@ -75,6 +97,9 @@ MAX_STEPS_PER_RUN=50
LOG_LEVEL=INFO
# DATABASE_STRING: Database connection string.
DATABASE_STRING="postgresql+psycopg://skyvern@localhost/skyvern"
# If you are using Windows use this DATABASE_STRING.
# DATABASE_STRING="postgresql+asyncpg://skyvern@localhost/skyvern"

# PORT: Port to run the agent on.
PORT=8000

Expand All @@ -87,4 +112,32 @@ ANALYTICS_ID="anonymous"
OP_SERVICE_ACCOUNT_TOKEN=""

# Enable recording skyvern logs as artifacts
ENABLE_LOG_ARTIFACTS=false
ENABLE_LOG_ARTIFACTS=false

# =============================================================================
# SKYVERN BITWARDEN CONFIGURATION
# =============================================================================
# Your organization ID in official Bitwarden server or vaultwarden (if using organizations)
SKYVERN_AUTH_BITWARDEN_ORGANIZATION_ID=your-org-id-here

# These should match the values for bitwarden cli server for consistency
SKYVERN_AUTH_BITWARDEN_MASTER_PASSWORD=your-master-password-here
SKYVERN_AUTH_BITWARDEN_CLIENT_ID=user.your-client-id-here
SKYVERN_AUTH_BITWARDEN_CLIENT_SECRET=your-client-secret-here

# The CLI server will run on localhost:8002 by default
# Optional, because by default Bitwarden is used directly
# BITWARDEN_SERVER=http://localhost
# BITWARDEN_SERVER_PORT=8002

# =============================================================================
# OPTIONAL: ADDITIONAL SKYVERN CONFIGURATION
# =============================================================================
# If you need to override the default Bitwarden server settings in Skyvern
# These will be automatically set by the Docker Compose, but you can override them here

# Maximum number of retries for Bitwarden operations
# BITWARDEN_MAX_RETRIES=3

# Timeout in seconds for Bitwarden operations
# BITWARDEN_TIMEOUT_SECONDS=60
27 changes: 27 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Set default behavior to automatically normalize line endings
* text=auto

# Force Unix LF line endings for shell scripts
*.sh text eol=lf
bitwarden-cli-server/entrypoint.sh text eol=lf

# Force Unix LF line endings for Python files
*.py text eol=lf

# Force Unix LF line endings for Docker files
Dockerfile text eol=lf
*.dockerfile text eol=lf

# Force Unix LF line endings for YAML and config files
*.yml text eol=lf
*.yaml text eol=lf
*.json text eol=lf
*.md text eol=lf

# Binary files
*.png binary
*.jpg binary
*.jpeg binary
*.gif binary
*.ico binary
*.pdf binary
4 changes: 2 additions & 2 deletions .github/sync.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ Skyvern-AI/skyvern-cloud:
deleteOrphaned: true
- source: pyproject.toml
dest: pyproject.toml
- source: poetry.lock
dest: poetry.lock
- source: uv.lock
dest: uv.lock
- source: setup.sh
dest: setup.sh
- source: .env.example
Expand Down
83 changes: 39 additions & 44 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,55 +32,50 @@ jobs:
- uses: actions/checkout@v3
# If you wanted to use multiple Python versions, you'd have specify a matrix in the job and
# reference the matrixe python version here.
- uses: actions/setup-python@v4
- uses: actions/setup-python@v6
with:
python-version: "3.11"
# Cache the installation of Poetry itself, e.g. the next step. This prevents the workflow
# from installing Poetry every time, which can be slow. Note the use of the Poetry version
# number in the cache key, and the "-0" suffix: this allows you to invalidate the cache
# manually if/when you want to upgrade Poetry, or if something goes wrong. This could be
# mildly cleaner by using an environment variable, but I don't really care.
- name: cache poetry install
uses: actions/cache@v3
# Install uv (fast, single-file binary)
- name: Install uv
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.local/bin" >> $GITHUB_PATH
# Cache uv's download/resolve cache to speed up CI (optional but nice)
- name: Cache uv global cache
uses: actions/cache@v4
with:
path: ~/.local
key: poetry-1.7.1
# Install Poetry. You could do this manually, or there are several actions that do this.
# `snok/install-poetry` seems to be minimal yet complete, and really just calls out to
# Poetry's default install script, which feels correct. I pin the Poetry version here
# because Poetry does occasionally change APIs between versions and I don't want my
# actions to break if it does.
#
# The key configuration value here is `virtualenvs-in-project: true`: this creates the
# venv as a `.venv` in your testing directory, which allows the next step to easily
# cache it.
- uses: snok/install-poetry@v1
with:
version: 1.7.1
virtualenvs-create: true
virtualenvs-in-project: true
# Cache your dependencies (i.e. all the stuff in your `pyproject.toml`). Note the cache
# key: if you're using multiple Python versions, or multiple OSes, you'd need to include
# them in the cache key. I'm not, so it can be simple and just depend on the poetry.lock.
- name: cache deps
id: cache-deps
uses: actions/cache@v3
path: ~/.cache/uv
key: uv-cache-${{ runner.os }}-${{ hashFiles('**/pyproject.toml', '**/uv.lock') }}
# Cache the project virtualenv (keyed by Python version + lockfile)
- name: Cache venv
id: cache-venv
uses: actions/cache@v4
with:
path: .venv
key: pydeps-${{ hashFiles('**/poetry.lock') }}
# Install dependencies. `--no-root` means "install all dependencies but not the project
# itself", which is what you want to avoid caching _your_ code. The `if` statement
# ensures this only runs on a cache miss.
- run: poetry install --no-interaction --no-root
if: steps.cache-deps.outputs.cache-hit != 'true'
# Now install _your_ project. This isn't necessary for many types of projects -- particularly
# things like Django apps don't need this. But it's a good idea since it fully-exercises the
# pyproject.toml and makes that if you add things like console-scripts at some point that
# they'll be installed and working.
- run: poetry install --no-interaction
key: venv-${{ runner.os }}-py${{ steps.setup-python.outputs.python-version || '3.11' }}-${{ hashFiles('**/uv.lock') }}
# Create/refresh the environment (installs main + dev groups)
- name: Sync deps with uv
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
uv lock
uv sync --group dev
# Ensure venv is current even on cache hit (cheap no-op if up to date)
- name: Ensure environment is up to date
if: steps.cache-venv.outputs.cache-hit == 'true'
run: |
uv sync --group dev
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version-file: .nvmrc
cache: npm
cache-dependency-path: skyvern-frontend/package-lock.json
- name: Install frontend dependencies
working-directory: skyvern-frontend
run: npm ci
# Finally, run pre-commit.
- name: Run all pre-commit hooks
uses: pre-commit/action@v3.0.0
run: uv run pre-commit run --all-files
env:
ENABLE_OPENAI: "true"
OPENAI_API_KEY: "sk-dummy"
Expand All @@ -102,14 +97,14 @@ jobs:
AZURE_GPT4O_MINI_API_VERSION: "dummy"
AWS_REGION: "us-east-1"
ENABLE_BEDROCK: "true"
run: poetry run ./run_alembic_check.sh
run: uv run ./run_alembic_check.sh
- name: trigger tests
env:
ENABLE_OPENAI: "true"
OPENAI_API_KEY: "sk-dummy"
AWS_ACCESS_KEY_ID: "dummy"
AWS_SECRET_ACCESS_KEY: "dummy"
run: poetry run pytest
run: uv run pytest
fe-lint-build:
name: Frontend Lint and Build
runs-on: ubuntu-latest
Expand Down
23 changes: 12 additions & 11 deletions .github/workflows/codeflash.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,22 +22,23 @@ jobs:
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: "3.11"
- name: Install Project Dependencies
- name: Install uv
run: |
python -m pip install --upgrade pip
pip install poetry
poetry install --all-extras
poetry add codeflash
- name: create test dir
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Sync project dependencies
run: |
mkdir -p codeflash-tests
- name: Run Codeflash to optimize code
uv sync --group dev
- name: Install Codeflash into venv
run: |
poetry env use python
poetry run codeflash
uv pip install codeflash
- name: Create test dir
run: mkdir -p codeflash-tests
- name: Run Codeflash to optimize code
run: uv run codeflash
- name: remove test dir
run: |-
rm -rf codeflash-tests
30 changes: 0 additions & 30 deletions .github/workflows/n8n-ci.yml

This file was deleted.

72 changes: 29 additions & 43 deletions .github/workflows/sdk-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,58 +45,44 @@ jobs:
uses: actions/checkout@v4
# If you wanted to use multiple Python versions, you'd have specify a matrix in the job and
# reference the matrixe python version here.
- uses: actions/setup-python@v4
- name: Setup Python
id: setup-python
uses: actions/setup-python@v6
with:
python-version: "3.11"
# Cache the installation of Poetry itself, e.g. the next step. This prevents the workflow
# from installing Poetry every time, which can be slow. Note the use of the Poetry version
# number in the cache key, and the "-0" suffix: this allows you to invalidate the cache
# manually if/when you want to upgrade Poetry, or if something goes wrong. This could be
# mildly cleaner by using an environment variable, but I don't really care.
- name: cache poetry install
uses: actions/cache@v3
with:
path: ~/.local
key: poetry-1.7.1
# Install Poetry. You could do this manually, or there are several actions that do this.
# `snok/install-poetry` seems to be minimal yet complete, and really just calls out to
# Poetry's default install script, which feels correct. I pin the Poetry version here
# because Poetry does occasionally change APIs between versions and I don't want my
# actions to break if it does.
#
# The key configuration value here is `virtualenvs-in-project: true`: this creates the
# venv as a `.venv` in your testing directory, which allows the next step to easily
# cache it.
- uses: snok/install-poetry@v1
# Cache the installation of `uv` itself, e.g. the next step. This prevents the workflow
# from installing `uv` every time, which can be slow.
- name: Install uv
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.local/bin" >> $GITHUB_PATH
# Cache uv's global cache (resolver/downloads) for speed
- name: Cache uv cache
uses: actions/cache@v4
with:
version: 1.7.1
virtualenvs-create: true
virtualenvs-in-project: true
# Cache your dependencies (i.e. all the stuff in your `pyproject.toml`). Note the cache
# key: if you're using multiple Python versions, or multiple OSes, you'd need to include
# them in the cache key. I'm not, so it can be simple and just depend on the poetry.lock.
- name: cache deps
id: cache-deps
uses: actions/cache@v3
path: ~/.cache/uv
key: uv-cache-${{ runner.os }}-${{ hashFiles('**/pyproject.toml', '**/uv.lock') }}
# Cache the project venv (keyed by lockfile + Python)
- name: Cache venv
id: cache-venv
uses: actions/cache@v4
with:
path: .venv
key: pydeps-${{ hashFiles('**/poetry.lock') }}
# Install dependencies. `--no-root` means "install all dependencies but not the project
# itself", which is what you want to avoid caching _your_ code. The `if` statement
# ensures this only runs on a cache miss.
- run: poetry install --no-interaction --no-root
if: steps.cache-deps.outputs.cache-hit != 'true'
# Now install _your_ project. This isn't necessary for many types of projects -- particularly
# things like Django apps don't need this. But it's a good idea since it fully-exercises the
# pyproject.toml and makes that if you add things like console-scripts at some point that
# they'll be installed and working.
- run: poetry install --no-interaction
key: venv-${{ runner.os }}-py${{ steps.setup-python.outputs.python-version || '3.11' }}-${{ hashFiles('**/uv.lock') }}
# Create/refresh environment. We install dev deps to get twine/build.
- name: Sync dependencies
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
uv sync --group dev
- name: Ensure environment is up to date (on cache hit)
if: steps.cache-venv.outputs.cache-hit == 'true'
run: uv sync --group dev
- name: Clean dist directory
run: rm -rf dist
- name: Build Package
run: poetry build
run: uv build
- name: Publish to PyPI
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
run: poetry run twine upload --repository pypi dist/*
run: uv run twine upload --repository pypi dist/*
Loading
Loading