Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/python execution #223

Open
wants to merge 17 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
496ceca
build: update dependencies for type support
devin-ai-integration[bot] Feb 23, 2025
1a62edb
fix: add type stubs and imports for server.py
devin-ai-integration[bot] Feb 23, 2025
778e15a
fix: add type annotations for async functions in server.py
devin-ai-integration[bot] Feb 23, 2025
80a9571
fix: remove explicit AsyncStream type annotations in favor of type in…
devin-ai-integration[bot] Feb 23, 2025
ec519c6
fix: update import paths for openai and ollama types
devin-ai-integration[bot] Feb 23, 2025
0dfe7c2
fix: improve type annotations and docstrings in server.py
devin-ai-integration[bot] Feb 23, 2025
1f2430d
fix: add comprehensive null checks for model providers
devin-ai-integration[bot] Feb 23, 2025
250ac28
fix: update models return type and OpenAI import
devin-ai-integration[bot] Feb 23, 2025
140dac7
fix: update model field definitions and version handling
devin-ai-integration[bot] Feb 23, 2025
c31c889
fix: add explicit return statements in perform_migration
devin-ai-integration[bot] Feb 23, 2025
7d99519
fix: update OpenAI import in litellm.py
devin-ai-integration[bot] Feb 23, 2025
dd26dbc
fix: update AsyncStream import and type annotations in openai.py
devin-ai-integration[bot] Feb 23, 2025
f2fbe97
fix: update return type annotation for models endpoint
devin-ai-integration[bot] Feb 23, 2025
e60540d
fix: update SchemaMigration model and DB path handling
devin-ai-integration[bot] Feb 23, 2025
66ef275
fix: update OpenAI import path in litellm.py
devin-ai-integration[bot] Feb 23, 2025
344a499
Merge pull request #2 from elchan/devin/1740337268-fix-backend-config
elchan Feb 23, 2025
0302eb2
Add Python code execution functionality
Feb 23, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions backend/Dockerfile.python
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
FROM python:3.12-slim
WORKDIR /app

# Create non-root user
RUN adduser --disabled-password --gecos "" executor

# Create execution directory with proper permissions
RUN mkdir /app/execution && chown executor:executor /app/execution

# Switch to non-root user
USER executor
WORKDIR /app/execution

# Keep container running
CMD ["tail", "-f", "/dev/null"]
102 changes: 102 additions & 0 deletions backend/openui/code_executor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
from typing import Dict, Any
from pydantic import BaseModel
import docker
from docker.errors import APIError, ContainerError
from . import config

class CodeExecutionRequest(BaseModel):
"""Request model for code execution."""
code: str
language: str = "python" # "python" or "bash"
timeout: int = 30 # seconds

class CodeExecutionResponse(BaseModel):
"""Response model for code execution results."""
output: str
error: str | None = None
status: str

class CodeExecutor:
"""Handles code execution in isolated Docker containers."""

def __init__(self):
"""Initialize Docker client."""
try:
self.client = docker.from_env()
except Exception as e:
raise RuntimeError(f"Failed to initialize Docker client: {e}")

async def execute(self, request: CodeExecutionRequest) -> CodeExecutionResponse:
"""
Execute code in an isolated Docker container.

Args:
request: CodeExecutionRequest containing code and execution parameters

Returns:
CodeExecutionResponse with execution results
"""
try:
# Configure container based on language
if request.language == "python":
command = ["python", "-c", request.code]
image = "python:3.12-slim"
elif request.language == "bash":
command = ["bash", "-c", request.code]
image = "ubuntu:22.04"
else:
return CodeExecutionResponse(
output="",
error=f"Unsupported language: {request.language}",
status="error"
)

# Run code in isolated container
container = self.client.containers.run(
image,
command,
remove=True,
detach=True,
network_disabled=True,
mem_limit="512m",
pids_limit=50,
stdout=True,
stderr=True
)

try:
# Wait for container to finish with timeout
container.wait(timeout=request.timeout)
# Get execution output
output = container.logs(stdout=True, stderr=True).decode()
return CodeExecutionResponse(
output=output,
status="success"
)
finally:
try:
container.remove(force=True)
except:
pass # Container might already be removed
except ContainerError as e:
# Handle container execution errors
error_msg = str(e)
return CodeExecutionResponse(
output="",
error=error_msg,
status="error"
)
except APIError as e:
# Handle Docker API errors
return CodeExecutionResponse(
output="",
error=str(e),
status="error"
)
except Exception as e:
# Handle unexpected errors
return CodeExecutionResponse(
output="",
error=f"Unexpected error: {str(e)}",
status="error"
)
27 changes: 20 additions & 7 deletions backend/openui/db/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
)
import uuid
import datetime
from pathlib import Path
from playhouse.sqlite_ext import SqliteExtDatabase, JSONField
from playhouse.migrate import SqliteMigrator, migrate
from openui import config
Expand All @@ -34,6 +35,7 @@ class Meta:


class SchemaMigration(BaseModel):
id = BinaryUUIDField(primary_key=True)
version = CharField()


Expand Down Expand Up @@ -81,6 +83,7 @@ class Usage(BaseModel):
output_tokens = IntegerField()
day = DateField()
user = ForeignKeyField(User, backref="usage")
user_id = BinaryUUIDField() # Explicit field for foreign key

class Meta:
primary_key = CompositeKey("user", "day")
Expand Down Expand Up @@ -122,13 +125,20 @@ def alter(schema: SchemaMigration, ops: list[list], version: str) -> bool:
except OperationalError as e:
print("Migration failed", e)
return False
schema.version = version
schema.save()
# Update version through model API
schema.update(version=version).where(SchemaMigration.id == schema.id).execute()
print(f"Migrated {version}")
return version != CURRENT_VERSION


def perform_migration(schema: SchemaMigration) -> bool:
"""Perform database schema migration.

Args:
schema: Current schema migration record
Returns:
bool: True if migration was performed, False otherwise
"""
if schema.version == "2024-03-08":
version = "2024-03-12"
aaguid = CharField(null=True)
Expand All @@ -142,18 +152,21 @@ def perform_migration(schema: SchemaMigration) -> bool:
version,
)
if altered:
perform_migration(schema)
return perform_migration(schema)
return True
if schema.version == "2024-03-12":
version = "2024-05-14"
database.create_tables([Vote])
schema.version = version
schema.save()
schema.update(version=version).where(SchemaMigration.id == schema.id).execute()
if version != CURRENT_VERSION:
perform_migration(schema)
return perform_migration(schema)
return True
return False # No migration needed


def ensure_migrated():
if not config.DB.exists():
db_path = Path(config.DB)
if not db_path.exists():
database.create_tables(
[User, Credential, Session, Component, SchemaMigration, Usage, Vote]
)
Expand Down
4 changes: 2 additions & 2 deletions backend/openui/litellm.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import yaml
import os
import tempfile
import openai
from openai._client import AsyncOpenAI as OpenAI
from .logs import logger


Expand Down Expand Up @@ -98,7 +98,7 @@ def generate_config():
)

if "OPENAI_COMPATIBLE_ENDPOINT" in os.environ:
client = openai.OpenAI(
client = OpenAI(
api_key=os.getenv("OPENAI_COMPATIBLE_API_KEY"),
base_url=os.getenv("OPENAI_COMPATIBLE_ENDPOINT"),
)
Expand Down
9 changes: 4 additions & 5 deletions backend/openui/openai.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import json
from openai import AsyncStream
from openai.types.chat import (
ChatCompletionChunk,
)
from openai._streaming import AsyncStream
from openai.types.chat import ChatCompletionChunk
from typing import AsyncGenerator
from .db.models import Usage


Expand All @@ -11,7 +10,7 @@ async def openai_stream_generator(
input_tokens: int,
user_id: str,
multiplier: int = 1,
):
) -> AsyncGenerator[str, None]:
# async for chunk in subscription.response.aiter_bytes():
# yield chunk
output_tokens = 0
Expand Down
Loading