Skip to content

Commit

Permalink
Merge pull request #289 from sirajperson/main
Browse files Browse the repository at this point in the history
Added local llm functionality by incorporating text-generation-webui
  • Loading branch information
I’m committed Jun 12, 2023
2 parents d9ea6d2 + 2c7ac53 commit 6789ab6
Show file tree
Hide file tree
Showing 43 changed files with 668 additions and 19 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@ COPY entrypoint.sh /entrypoint.sh
COPY wait-for-it.sh /wait-for-it.sh
RUN chmod +x /entrypoint.sh /wait-for-it.sh

CMD ["/wait-for-it.sh", "super__postgres:5432","-t","60","--","/entrypoint.sh"]
CMD ["/wait-for-it.sh", "super__postgres:5432","-t","60","--","/entrypoint.sh"]
2 changes: 1 addition & 1 deletion DockerfileCelery
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,4 @@ WORKDIR /app
COPY . .
COPY config.yaml .

CMD ["celery", "-A", "superagi.worker", "worker", "--loglevel=info"]
CMD ["celery", "-A", "superagi.worker", "worker", "--loglevel=info"]
2 changes: 1 addition & 1 deletion cli2.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ def run_npm_commands(shell=False):


def run_server(shell=False,a_name=None,a_description=None,goals=None):
tgwui_process = Process(target=subprocess.run, args=(["python", "test.py","--name",a_name,"--description",a_description,"--goals"]+goals,), kwargs={"shell": shell})
api_process = Process(target=subprocess.run, args=(["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"],), kwargs={"shell": shell})
celery_process = Process(target=subprocess.run, args=(["celery", "-A", "celery_app", "worker", "--loglevel=info"],), kwargs={"shell": shell})
ui_process = Process(target=subprocess.run, args=(["python", "test.py","--name",a_name,"--description",a_description,"--goals"]+goals,), kwargs={"shell": shell})

api_process.start()
celery_process.start()
ui_process.start()
Expand Down
12 changes: 10 additions & 2 deletions config_template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,17 @@ PINECONE_ENVIRONMENT: YOUR_PINECONE_ENVIRONMENT

OPENAI_API_KEY: YOUR_OPEN_API_KEY

#DATABASE INFO
# For locally hosted LLMs comment out the next line and uncomment the one after
# to configure a local llm point your browser to 127.0.0.1:7860 and click on the model tab in text generation web ui.
OPENAI_API_BASE: https://api.openai.com/v1
#OPENAI_API_BASE: "http://super__tgwui:5001/v1"

# "gpt-3.5-turbo-0301": 4032, "gpt-4-0314": 8092, "gpt-3.5-turbo": 4032, "gpt-4": 8092, "llama":2048, "mpt-7b-storywriter":45000
MODEL_NAME: "gpt-3.5-turbo-0301"
MAX_TOOL_TOKEN_LIMIT: 800
MAX_MODEL_TOKEN_LIMIT: 4032 # set to 2048 for llama

#DATABASE INFO
# redis details
DB_NAME: super_agi_main
POSTGRES_URL: super__postgres
Expand Down Expand Up @@ -48,7 +56,7 @@ ENCRYPTION_KEY: secret
GOOGLE_API_KEY: YOUR_GOOGLE_API_KEY
SEARCH_ENGINE_ID: YOUR_SEARCH_ENIGNE_ID

# IF YOU DONT HAVE GOOGLE SERACH KEY, USE THIS
# IF YOU DONT HAVE GOOGLE SEARCH KEY, USE THIS
SERP_API_KEY: YOUR_SERP_API_KEY

#ENTER YOUR EMAIL CREDENTIALS TO ACCESS EMAIL TOOL
Expand Down
8 changes: 4 additions & 4 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ services:
- super__postgres
networks:
- super_network

gui:
build: ./gui
ports:
Expand All @@ -39,12 +39,12 @@ services:
- /app/.next

super__redis:
image: "redis:latest"
image: "docker.io/library/redis:latest"
networks:
- super_network

super__postgres:
image: "postgres:latest"
image: "docker.io/library/postgres:latest"
environment:
- POSTGRES_USER=superagi
- POSTGRES_PASSWORD=password
Expand All @@ -61,4 +61,4 @@ networks:
driver: bridge

volumes:
superagi_postgres_data:
superagi_postgres_data:
93 changes: 93 additions & 0 deletions local-llm
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
version: '3.8'

services:
backend:
volumes:
- "./:/app"
build: .
ports:
- "8001:8001"
depends_on:
- super__tgwui
- super__redis
- super__postgres
networks:
- super_network

celery:
volumes:
- "./:/app"
build:
context: .
dockerfile: DockerfileCelery
depends_on:
- super__tgwui
- super__redis
- super__postgres
networks:
- super_network

gui:
build: ./gui
ports:
- "3000:3000"
environment:
- NEXT_PUBLIC_API_BASE_URL=http://localhost:8001
networks:
- super_network
volumes:
- ./gui:/app
- /app/node_modules
- /app/.next

super__tgwui:
build:
context: .
dockerfile: ./tgwui/DockerfileTGWUI
container_name: super__tgwui
environment:
- EXTRA_LAUNCH_ARGS="--listen --verbose --extensions openai --threads 4 --n_ctx 1600"
ports:
- 7860:7860 # Default web port
- 5000:5000 # Default API port
- 5005:5005 # Default streaming port
- 5001:5001 # Default OpenAI API extension port
volumes:
- ./tgwui/config/loras:/app/loras
- ./tgwui/config/models:/app/models
- ./tgwui/config/presets:/app/presets
- ./tgwui/config/prompts:/app/prompts
- ./tgwui/config/softprompts:/app/softprompts
- ./tgwui/config/training:/app/training
logging:
driver: json-file
options:
max-file: "3" # number of files or file count
max-size: '10m'
networks:
- super_network

super__redis:
image: "docker.io/library/redis:latest"
networks:
- super_network

super__postgres:
image: "docker.io/library/postgres:latest"
environment:
- POSTGRES_USER=superagi
- POSTGRES_PASSWORD=password
- POSTGRES_DB=super_agi_main
volumes:
- superagi_postgres_data:/var/lib/postgresql/data/
networks:
- super_network
ports:
- "5432:5432"

networks:
super_network:
driver: bridge

volumes:
superagi_postgres_data:
107 changes: 107 additions & 0 deletions local-llm-gpu
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
version: '3.8'

services:
backend:
volumes:
- "./:/app"
build: .
ports:
- "8001:8001"
depends_on:
- super__tgwui
- super__redis
- super__postgres
networks:
- super_network

celery:
volumes:
- "./:/app"
build:
context: .
dockerfile: DockerfileCelery
depends_on:
- super__tgwui
- super__redis
- super__postgres
networks:
- super_network

gui:
build: ./gui
ports:
- "3000:3000"
environment:
- NEXT_PUBLIC_API_BASE_URL=http://localhost:8001
networks:
- super_network
volumes:
- ./gui:/app
- /app/node_modules
- /app/.next

super__tgwui:
build:
context: .
target: llama-cublas
dockerfile: ./tgwui/DockerfileTGWUI
# args:
# - LCL_SRC_DIR=text-generation-webui # Developers - see Dockerfile app_base
container_name: super__tgwui
environment:
- EXTRA_LAUNCH_ARGS="--listen --no-mmap --verbose --extensions openai --auto-devices --n_ctx 1600 --gpu-memory 20 20 --n-gpu-layers 128 --threads 8 --model vicuna-13b-cot.ggmlv3.q8_0.bin"
ports:
- 7860:7860 # Default web port
- 5000:5000 # Default API port
- 5005:5005 # Default streaming port
- 5001:5001 # Default OpenAI API extension port
volumes:
- ./tgwui/config/loras:/app/loras
- ./tgwui/config/models:/app/models
- ./tgwui/config/presets:/app/presets
- ./tgwui/config/prompts:/app/prompts
- ./tgwui/config/softprompts:/app/softprompts
- ./tgwui/config/training:/app/training
- ./tgwui/config/embeddings:/app/embeddings
logging:
driver: json-file
options:
max-file: "3" # number of files or file count
max-size: '10m'
networks:
- super_network
### Uncomment the following lines to run the container using the host machine's GPU resources
deploy:
resources:
reservations:
devices:
- driver: nvidia
# count: "all"
device_ids: ['0', '1'] # must comment the above line if this line is uncommented.
capabilities: [gpu]


super__redis:
image: "docker.io/library/redis:latest"
networks:
- super_network

super__postgres:
image: "docker.io/library/postgres:latest"
environment:
- POSTGRES_USER=superagi
- POSTGRES_PASSWORD=password
- POSTGRES_DB=super_agi_main
volumes:
- superagi_postgres_data:/var/lib/postgresql/data/
networks:
- super_network
ports:
- "5432:5432"

networks:
super_network:
driver: bridge

volumes:
superagi_postgres_data:
6 changes: 6 additions & 0 deletions run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,12 @@ if [ ! -f "config.yaml" ]; then
exit 1
fi

if [ ! -f "tgwui/text-generation-webui" ]; then
echo "Downloading tgwui src"
git clone https://github.com/oobabooga/text-generation-webui
mv text-generation-webui tgwui
fi

# Function to check if virtual environment is activated
is_venv_activated() {
[[ -n "$VIRTUAL_ENV" ]]
Expand Down
4 changes: 2 additions & 2 deletions superagi/agent/super_agi.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def execute(self, workflow_step: AgentWorkflowStep):
agent_execution_id = self.agent_config["agent_execution_id"]
task_queue = TaskQueue(str(agent_execution_id))

token_limit = TokenCounter.token_limit(self.llm.get_model())
token_limit = TokenCounter.token_limit()
agent_feeds = self.fetch_agent_feeds(session, self.agent_config["agent_execution_id"], self.agent_config["agent_id"])
current_calls = 0
if len(agent_feeds) <= 0:
Expand Down Expand Up @@ -283,7 +283,7 @@ def build_agent_prompt(self, prompt: str, task_queue: TaskQueue, max_token_limit
last_task = response["task"]
last_task_result = response["response"]
current_task = task_queue.get_first_task() or ""
token_limit = TokenCounter.token_limit(self.llm.get_model()) - max_token_limit
token_limit = TokenCounter.token_limit() - max_token_limit
prompt = AgentPromptBuilder.replace_task_based_variables(prompt, current_task, last_task, last_task_result,
pending_tasks, completed_tasks, token_limit)
return prompt
2 changes: 1 addition & 1 deletion superagi/helper/json_cleaner.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def extract_json_section(cls, input_str: str = ""):

@classmethod
def remove_escape_sequences(cls, string):
return string.encode('utf-8').decode('unicode_escape').encode('raw_unicode_escape').decode('utf-8')
return string.encode('utf-8').decode('unicode_escape').encode('raw_unicode_escape')

@classmethod
def add_quotes_to_property_names(cls, json_string: str) -> str:
Expand Down
2 changes: 1 addition & 1 deletion superagi/jobs/agent_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def execute_next_action(self, agent_execution_id):
memory = VectorFactory.get_vector_storage("PineCone", "super-agent-index1",
OpenAiEmbedding(model_api_key))
except:
print("Unable to setup the pincone connection...")
print("Unable to setup the pinecone connection...")
memory = None

user_tools = session.query(Tool).filter(Tool.id.in_(parsed_config["tools"])).all()
Expand Down
5 changes: 3 additions & 2 deletions superagi/llms/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@


class OpenAi(BaseLlm):
def __init__(self, api_key, image_model=None, model="gpt-4", temperature=0.6, max_tokens=4032, top_p=1,
def __init__(self, api_key, image_model=None, model="gpt-4", temperature=0.6, max_tokens=get_config("MAX_MODEL_TOKEN_LIMIT"), top_p=1,
frequency_penalty=0,
presence_penalty=0, number_of_results=1):
self.model = model
Expand All @@ -21,14 +21,15 @@ def __init__(self, api_key, image_model=None, model="gpt-4", temperature=0.6, ma
self.api_key = api_key
self.image_model = image_model
openai.api_key = api_key
openai.api_base = get_config("OPENAI_API_BASE", "https://api.openai.com/v1")

def get_model(self):
return self.model

def get_image_model(self):
return self.image_model

def chat_completion(self, messages, max_tokens=4032):
def chat_completion(self, messages, max_tokens=get_config("MAX_MODEL_TOKEN_LIMIT")):
try:
# openai.api_key = get_config("OPENAI_API_KEY")
response = openai.ChatCompletion.create(
Expand Down
8 changes: 4 additions & 4 deletions superagi/tools/email/send_email_attachment.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ class SendEmailAttachmentInput(BaseModel):
to: str = Field(..., description="Email Address of the Receiver, default email address is 'example@example.com'")
subject: str = Field(..., description="Subject of the Email to be sent")
body: str = Field(..., description="Email Body to be sent")
filename: str = Field(..., description="Name of the file to be sent as an Attachement with Email")
filename: str = Field(..., description="Name of the file to be sent as an Attachment with Email")


class SendEmailAttachmentTool(BaseTool):
name: str = "Send Email with Attachement"
name: str = "Send Email with Attachment"
args_schema: Type[BaseModel] = SendEmailAttachmentInput
description: str = "Send an Email with a file attached to it"

Expand All @@ -32,9 +32,9 @@ def _execute(self, to: str, subject: str, body: str, filename: str) -> str:
base_path = base_path + filename
attachmentpath = base_path
attachment = os.path.basename(attachmentpath)
return self.send_email_with_attachement(to, subject, body, attachmentpath, attachment)
return self.send_email_with_attachment(to, subject, body, attachmentpath, attachment)

def send_email_with_attachement(self, to, subject, body, attachment_path, attachment) -> str:
def send_email_with_attachment(self, to, subject, body, attachment_path, attachment) -> str:
email_sender = get_config('EMAIL_ADDRESS')
email_password = get_config('EMAIL_PASSWORD')
if email_sender == "" or email_sender.isspace():
Expand Down
Loading

0 comments on commit 6789ab6

Please sign in to comment.