Skip to content

Commit

Permalink
Add dev container and clean up repo
Browse files Browse the repository at this point in the history
  • Loading branch information
Enias Cailliau committed May 16, 2023
1 parent 3a93ae3 commit 09e2adc
Show file tree
Hide file tree
Showing 12 changed files with 342 additions and 211 deletions.
25 changes: 25 additions & 0 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/python
{
"name": "Python 3",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
// Note: Steamship currently requires Python 3.8
"image": "mcr.microsoft.com/devcontainers/python:0-3.8",

// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},

// Configure tool-specific properties.
"customizations": {
// Configure properties specific to VS Code.
"vscode": {
"settings": {},
"extensions": [
"streetsidesoftware.code-spell-checker"
]
}
},

// Use 'postCreateCommand' to run commands after the container is created.
"postCreateCommand": "pip3 install -r requirements.txt"
}
16 changes: 16 additions & 0 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"version": "0.1.0",
"configurations": [
{
"name": "Run",
"type": "python",
"request": "launch",
"program": "${workspaceRoot}/main.py",
"console": "integratedTerminal",
"justMyCode": true,
"env": {
"PYTHONPATH": "${workspaceRoot}/src"
}
}
]
}
26 changes: 9 additions & 17 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,25 +1,17 @@
# Tutorial: Telegram chatbot with LangChain
# : Deploying Multi-Modal LangChain Agents

This project contains the necessary scaffolding to deploy LangChain conversation agents with memory and connect them to Telegram.
This project contains the necessary scaffolding to deploy LangChain conversation agents with memory and connect them to
Telegram.

These 4 steps should get you online. If not, shoot me a message on [Discord](https://steamship.com/discord). Happy to help you out.
These 4 steps should get you online. If not, shoot me a message on [Discord](https://steamship.com/discord). Happy to
help you out.

Let's go:

Let's go:
> Step 1: Just copy and paste your LangChain agent into `src/chatbot/get_agent`
> Step 1: Just copy paste your LangChain conversation LLMChain into `src/chatbot/get_chatbot`

> Step 2: Pip-install the latest `steamship_langchain`: `pip install --upgrade steamship_langchain`
> Step 2: Add your telegram bot access token under `BOT_TOKEN` in `src/chatbot.py`. More info [here](docs/register-telegram-bot.md)


> Step 3: Pip install the latest `steamship_langchain`: `pip install --upgrade steamship_langchain`

> Step 4: Run `python deploy.py`

## Variations

Examples of this package:
* Gym Bro with long-term memory: https://github.com/steamship-packages/langchain-telegram-chatbot/tree/ec/gym-bro
> Step 3: Run `python deploy.py`
2 changes: 1 addition & 1 deletion deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
package_handle=manifest.handle,
version=manifest.version,
instance_handle=f"{manifest.handle}-{manifest.version.replace('.', '-')}",
config={"bot_token": "5629695237:AAFwmYgYRIV1tyPSBEhdYhuQMPVFu_dliAA"},
config={"bot_token": "6140681319:AAFqNDOs68qROhCxUO8qOhR8V0IEr5k5vb8"},
)

bot.wait_for_init()
Expand Down
78 changes: 78 additions & 0 deletions main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import logging
import sys
from functools import partial
from typing import List

from steamship.experimental.transports.chat import ChatMessage

from api import LangChainTelegramChatbot

sys.path.insert(0, "src")
from steamship import Steamship, SteamshipError
from steamship.cli.ship_spinner import ship_spinner
from termcolor import colored


def show_results(response_messages: List[ChatMessage]):
print(colored("\nResults: ", "blue", attrs=["bold"]))
for message in response_messages:
if message.mime_type and message.mime_type.startswith("image"):
print(message.url, end="\n\n")
else:
print(message.text, end="\n\n")


class LoggingDisabled:
"""Context manager that turns off logging within context."""

def __enter__(self):
logging.disable(logging.CRITICAL)

def __exit__(self, exit_type, exit_value, exit_traceback):
logging.disable(logging.NOTSET)


def main():
Steamship()

with Steamship.temporary_workspace() as client:
run = partial(run_agent, agent=LangChainTelegramChatbot(client=client, config={"bot_token": "test"}))
print(f"Starting Agent...")

print(
f"If you make code changes, you will need to restart this client. Press CTRL+C to exit at any time.\n"
)

count = 1

while True:
print(f"----- Agent Run {count} -----")
prompt = input(colored(f"Prompt: ", "blue"))
run(
# client,
prompt=prompt,
)
count += 1


def run_agent(agent, prompt: str, as_api: bool = False) -> None:
# For Debugging
if not agent.is_verbose_logging_enabled(): # display progress when verbose is False
print("Running: ", end="")
with ship_spinner():
response = agent.create_response(incoming_message=ChatMessage(text=prompt, chat_id="123"))
else:
response = agent.create_response(incoming_message=ChatMessage(text=prompt, chat_id="123"))

show_results(response)


if __name__ == "__main__":
# when running locally, we can use print statements to capture logs / info.
# as a result, we will disable python logging to run. this will keep the output cleaner.
with LoggingDisabled():
try:
main()
except SteamshipError as e:
print(colored("Aborting! ", "red"), end="")
print(f"There was an error encountered when running: {e}")
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
steamship_langchain==0.0.20rc2
pytimeparse
steamship==2.16.10rc1
steamship==2.16.10rc1
termcolor
93 changes: 93 additions & 0 deletions src/agent/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
"""Define your LangChain chatbot."""
import re
from abc import abstractmethod
from typing import List, Optional

from langchain.agents import AgentExecutor
from steamship import Block
from steamship.experimental.package_starters.telegram_bot import TelegramBot
from steamship.experimental.transports.chat import ChatMessage
from steamship.invocable import post

from agent.utils import is_valid_uuid, make_image_public, UUID_PATTERN


class LangChainAgentBot(TelegramBot):

@abstractmethod
def get_agent(self, chat_id: str) -> AgentExecutor:
raise NotImplementedError()

def is_verbose_logging_enabled(self):
return True

@post("send_message")
def send_message(self, message: str, chat_id: str) -> str:
"""Send a message to Telegram.
Note: This is a private endpoint that requires authentication."""
self.telegram_transport.send([ChatMessage(text=message, chat_id=chat_id)])
return "ok"

def _invoke_later(self, delay_ms: int, message: str, chat_id: str):
self.invoke_later(
"send_message",
delay_ms=delay_ms,
arguments={
"message": message,
"chat_id": chat_id,
},
)

def create_response(
self, incoming_message: ChatMessage
) -> Optional[List[ChatMessage]]:
"""Use the LLM to prepare the next response by appending the user input to the file and then generating."""
if incoming_message.text == "/start":
return [
ChatMessage(
text="New conversation started.",
chat_id=incoming_message.get_chat_id(),
)
]

conversation = self.get_agent(
chat_id=incoming_message.get_chat_id(),
)
response = conversation.run(input=incoming_message.text)
response = UUID_PATTERN.split(response)
response = [re.sub(r"^\W+", "", el) for el in response]
return self.agent_output_to_chat_messages(
chat_id=incoming_message.get_chat_id(), agent_output=response
)

def agent_output_to_chat_messages(
self, chat_id: str, agent_output: List[str]
) -> List[ChatMessage]:
"""Transform the output of the Multi-Modal Agent into a list of ChatMessage objects.
The response of a Multi-Modal Agent contains one or more:
- parseable UUIDs, representing a block containing binary data, or:
- Text
This method inspects each string and creates a ChatMessage of the appropriate type.
"""
ret = []
for part_response in agent_output:
if is_valid_uuid(part_response):
block = Block.get(self.client, _id=part_response)
message = ChatMessage.from_block(
block,
chat_id=chat_id,
)
message.url = make_image_public(self.client, block)

else:
message = ChatMessage(
client=self.client,
chat_id=chat_id,
text=part_response,
)

ret.append(message)
return ret
60 changes: 0 additions & 60 deletions src/agent/get_agent.py

This file was deleted.

49 changes: 0 additions & 49 deletions src/agent/parser.py

This file was deleted.

Loading

0 comments on commit 09e2adc

Please sign in to comment.