Skip to content

Commit cea436a

Browse files
committed
open source
0 parents  commit cea436a

18 files changed

+463
-0
lines changed

.gitignore

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
*.env
2+
*.langchain.db
3+
.vscode
4+
.DS_STORE
5+
__pycache__

README.md

+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# Chainlit Cookbook
2+
3+
Welcome to the Chainlit Demos repository! Here you'll find a collection of example projects demonstrating how to use Chainlit to create amazing chatbot UIs with ease. Each folder in this repository represents a separate demo project.
4+
5+
## 🚀 Getting Started
6+
7+
To run a demo, follow these steps:
8+
9+
1. Clone this repository:
10+
```
11+
git clone https://github.com/Chainlit/cookbook.git chainlit-cookbook
12+
```
13+
2. Navigate to the desired demo folder:
14+
```
15+
cd chainlit-cookbook/demo-folder-name
16+
```
17+
3. Install the required dependencies:
18+
```
19+
pip install -r requirements.txt
20+
```
21+
4. Create a `.env` file based on the provided `.env.example` file:
22+
```
23+
cp .env.example .env
24+
```
25+
Modify the `.env` file as needed to include any necessary API keys or configuration settings.
26+
5. Run the Chainlit app in watch mode:
27+
```
28+
chainlit run app.py -w
29+
```
30+
31+
Your demo chatbot UI should now be up and running in your browser!
32+
33+
## 💁 Contributing
34+
35+
We'd love to see more demos showcasing the power of Chainlit. If you have an idea for a demo or want to contribute one, please feel free to open an issue or create a pull request. Your contributions are highly appreciated!

image-gen/.chainlit/config.toml

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
[project]
2+
# Name of the app and chatbot.
3+
name = "Img Gen"
4+
5+
# If true (default), the app will be available to anonymous users (once deployed).
6+
# If false, users will need to authenticate and be part of the project to use the app.
7+
public = true
8+
9+
# The project ID (found on https://cloud.chainlit.io).
10+
# If provided, all the message data will be stored in the cloud.
11+
# The project ID is required when public is set to false.
12+
#id = ""
13+
14+
# Whether to enable telemetry (default: true). No personal data is collected.
15+
enable_telemetry = true
16+
17+
# List of environment variables to be provided by each user to use the app.
18+
user_env = []
19+
20+
# Hide the chain of thought details from the user in the UI.
21+
hide_cot = false
22+
23+
# Link to your github repo. This will add a github button in the UI's header.
24+
github = "https://github.com/Chainlit/demos"
25+
26+
# Limit the number of requests per user.
27+
#request_limit = "10 per day"

image-gen/.env.example

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
OPENAI_API_KEY=YOUR_OPENAI_API_KEY
2+
STABILITY_KEY=YOUR_STABILITY_KEY

image-gen/app.py

+65
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
import chainlit as cl
2+
from chainlit.action import Action
3+
from tools import generate_image_tool, edit_image_tool
4+
from langchain.agents import initialize_agent, AgentType
5+
from langchain.chat_models import ChatOpenAI
6+
from langchain.memory import ConversationBufferMemory
7+
from langchain.agents.structured_chat.prompt import SUFFIX
8+
9+
10+
@cl.action_callback("Create variation")
11+
def create_variant(action: Action):
12+
agent = cl.user_session.get("agent")
13+
agent_input = f"Create a variation of {action.value}"
14+
cl.send_message(f"Creating a variation of `{action.value}`.")
15+
run(agent, agent_input)
16+
17+
18+
@cl.langchain_rename
19+
def rename(orig_author):
20+
mapping = {
21+
"LLMChain": "Assistant",
22+
}
23+
return mapping.get(orig_author, orig_author)
24+
25+
26+
@cl.langchain_factory
27+
def main():
28+
llm = ChatOpenAI(temperature=0, streaming=True)
29+
tools = [generate_image_tool, edit_image_tool]
30+
memory = ConversationBufferMemory(memory_key="chat_history")
31+
_SUFFIX = "Chat history:\n{chat_history}\n\n" + SUFFIX
32+
33+
agent_executor = initialize_agent(
34+
tools=tools,
35+
llm=llm,
36+
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
37+
memory=memory,
38+
agent_kwargs={
39+
"suffix": _SUFFIX,
40+
"input_variables": ["input", "agent_scratchpad", "chat_history"],
41+
},
42+
)
43+
44+
return agent_executor
45+
46+
47+
@cl.langchain_run
48+
def run(agent_executor, action_input):
49+
cl.user_session.set("generated_image", None)
50+
res = agent_executor.run(input=action_input)
51+
52+
elements = []
53+
actions = []
54+
55+
generated_image_name = cl.user_session.get("generated_image")
56+
generated_image = cl.user_session.get(generated_image_name)
57+
if generated_image:
58+
elements = [
59+
cl.LocalImage(
60+
content=generated_image, name=generated_image_name, display="inline"
61+
)
62+
]
63+
actions = [cl.Action(name="Create variation", value=generated_image_name)]
64+
65+
cl.send_message(content=res, elements=elements, actions=actions)

image-gen/chainlit.md

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# 🎨 Chainlit Image Gen demo
2+
3+
Welcome to our creative image generation app built with [Chainlit](https://chainlit.io), [LangChain](https://python.langchain.com/en/latest/index.html), and [Stability AI](https://stability.ai/)! 🌟 This app allows you to create and edit unique images simply by chatting with it. Talk about having an artistic conversation! 🎨🗨️
4+
5+
## 🎯 How it Works
6+
7+
It's super simple! Just input your desired image description, and our app will generate a custom image based on your request. You can even edit the image by asking more questions or making modifications. Cool, isn't it? 😎
8+
9+
Try asking:
10+
```
11+
create an image of an astronaut riding a horse on Mars, HD, dramatic lighting
12+
```
13+
14+
You can then ask for modifications:
15+
```
16+
change the pose of the horse
17+
```
18+
19+
Or create new images!

image-gen/requirements.txt

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
chainlit
2+
langchain
3+
pillow
4+
stability_sdk==0.8.0

image-gen/tools.py

+104
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
import os
2+
import io
3+
import chainlit as cl
4+
from stability_sdk import client
5+
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
6+
from PIL import Image
7+
from langchain.tools import Tool, StructuredTool
8+
9+
os.environ["STABILITY_HOST"] = "grpc.stability.ai:443"
10+
11+
12+
def get_image_name():
13+
image_count = cl.user_session.get("image_count")
14+
if image_count is None:
15+
image_count = 0
16+
else:
17+
image_count += 1
18+
19+
cl.user_session.set("image_count", image_count)
20+
21+
return f"image-{image_count}"
22+
23+
24+
def _generate_image(prompt: str, init_image=None):
25+
# Set up our connection to the API.
26+
stability_api = client.StabilityInference(
27+
key=os.environ["STABILITY_KEY"], # API Key reference.
28+
verbose=True, # Print debug messages.
29+
engine="stable-diffusion-xl-beta-v2-2-2", # Set the engine to use for generation.
30+
# Available engines: stable-diffusion-v1 stable-diffusion-v1-5 stable-diffusion-512-v2-0 stable-diffusion-768-v2-0
31+
# stable-diffusion-512-v2-1 stable-diffusion-768-v2-1 stable-diffusion-xl-beta-v2-2-2 stable-inpainting-v1-0 stable-inpainting-512-v2-0
32+
)
33+
34+
start_schedule = 0.8 if init_image else 1
35+
36+
# Set up our initial generation parameters.
37+
answers = stability_api.generate(
38+
prompt=prompt,
39+
init_image=init_image,
40+
start_schedule=start_schedule,
41+
seed=992446758, # If a seed is provided, the resulting generated image will be deterministic.
42+
# What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
43+
# Note: This isn't quite the case for CLIP Guided generations, which we tackle in the CLIP Guidance documentation.
44+
steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
45+
cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
46+
# Setting this value higher increases the strength in which it tries to match your prompt.
47+
# Defaults to 7.0 if not specified.
48+
width=512, # Generation width, defaults to 512 if not included.
49+
height=512, # Generation height, defaults to 512 if not included.
50+
samples=1, # Number of images to generate, defaults to 1 if not included.
51+
sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
52+
# Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
53+
# (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m, k_dpmpp_sde)
54+
)
55+
56+
# Set up our warning to print to the console if the adult content classifier is tripped.
57+
# If adult content classifier is not tripped, save generated images.
58+
for resp in answers:
59+
for artifact in resp.artifacts:
60+
if artifact.finish_reason == generation.FILTER:
61+
raise ValueError(
62+
"Your request activated the API's safety filters and could not be processed."
63+
"Please modify the prompt and try again."
64+
)
65+
if artifact.type == generation.ARTIFACT_IMAGE:
66+
name = get_image_name()
67+
cl.user_session.set(name, artifact.binary)
68+
cl.user_session.set("generated_image", name)
69+
return name
70+
else:
71+
raise ValueError(
72+
f"Your request did not generate an image. Please modify the prompt and try again. Finish reason: {artifact.finish_reason}"
73+
)
74+
75+
76+
def generate_image(prompt: str):
77+
image_name = _generate_image(prompt)
78+
return f"Here is {image_name}."
79+
80+
81+
def edit_image(init_image_name: str, prompt: str):
82+
init_image_bytes = cl.user_session.get(init_image_name)
83+
if init_image_bytes is None:
84+
raise ValueError(f"Could not find image `{init_image_name}`.")
85+
86+
init_image = Image.open(io.BytesIO(init_image_bytes))
87+
image_name = _generate_image(prompt, init_image)
88+
89+
return f"Here is {image_name} based on {init_image_name}."
90+
91+
92+
generate_image_tool = Tool.from_function(
93+
func=generate_image,
94+
name="GenerateImage",
95+
description="Useful to create an image from a text prompt.",
96+
return_direct=True,
97+
)
98+
99+
edit_image_tool = StructuredTool.from_function(
100+
func=edit_image,
101+
name="EditImage",
102+
description="Useful to edit an image with a prompt. Works well with commands such as 'replace', 'add', 'change', 'remove'.",
103+
return_direct=True,
104+
)
+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
[project]
2+
# Name of the app and chatbot.
3+
name = "Klarna plugin"
4+
5+
# If true (default), the app will be available to anonymous users (once deployed).
6+
# If false, users will need to authenticate and be part of the project to use the app.
7+
public = true
8+
9+
# The project ID (found on https://cloud.chainlit.io).
10+
# If provided, all the message data will be stored in the cloud.
11+
# The project ID is required when public is set to false.
12+
#id = ""
13+
14+
# List of environment variables to be provided by each user to use the app.
15+
user_env = []
16+
17+
# Hide the chain of thought details from the user in the UI.
18+
hide_cot = false
19+
20+
# Limit the number of requests per user.
21+
request_limit = "20 per day"
22+
23+
github = "https://github.com/Chainlit/demos"

langchain-aiplugins/.env.example

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
OPENAI_API_KEY=YOUR_OPENAI_API_KEY

langchain-aiplugins/app.py

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
from langchain.chat_models import ChatOpenAI
2+
from langchain.agents import load_tools, initialize_agent
3+
from langchain.agents import AgentType
4+
from langchain.tools import AIPluginTool
5+
from chainlit import langchain_factory
6+
7+
8+
@langchain_factory
9+
def load():
10+
tool = AIPluginTool.from_plugin_url(
11+
"https://www.klarna.com/.well-known/ai-plugin.json"
12+
)
13+
llm = ChatOpenAI(temperature=0, streaming=True)
14+
tools = load_tools(["requests_all"])
15+
tools += [tool]
16+
17+
return initialize_agent(
18+
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
19+
)

langchain-aiplugins/chainlit.md

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# Welcome to Chainlit! 🚀🤖
2+
3+
This is a demo using langchain and the Klarna plugin!
4+
5+
## Test it
6+
7+
Try typing
8+
```what t shirts are available in klarna?```
9+

langchain-aiplugins/requirements.txt

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
langchain==0.0.157
2+
chainlit

pdf-qa/.chainlit/config.toml

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
[project]
2+
# Name of the app and chatbot.
3+
name = "PDF QA"
4+
5+
# If true (default), the app will be available to anonymous users (once deployed).
6+
# If false, users will need to authenticate and be part of the project to use the app.
7+
public = true
8+
9+
# The project ID (found on https://cloud.chainlit.io).
10+
# If provided, all the message data will be stored in the cloud.
11+
# The project ID is required when public is set to false.
12+
#id = ""
13+
14+
# List of environment variables to be provided by each user to use the app.
15+
user_env = []
16+
17+
# Hide the chain of thought details from the user in the UI.
18+
hide_cot = false
19+
20+
# Limit the number of requests per user.
21+
request_limit = "20 per day"
22+
23+
github = "https://github.com/Chainlit/demos"

pdf-qa/.env.example

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
PINECONE_API_KEY=YOUR_PINECONE_API_KEY
2+
PINECONE_ENV=YOUR_PINECONE_ENV
3+
OPENAI_API_KEY=YOUR_OPENAI_API_KEY

0 commit comments

Comments
 (0)