Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,9 @@ DISCORD_BOT_TOKEN=x
DISCORD_CLIENT_ID=x

ALLOWED_SERVER_IDS=1
SERVER_TO_MODERATION_CHANNEL=1:1

OPENAI_API_URL=https://api.openai.com/v1/chat/completions
OPENAI_MODEL=gpt-3.5-turbo

SYSTEM_MESSAGE="You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible. Knowledge cutoff: {knowledge_cutoff} Current date: {current_date}"
KNOWLEDGE_CUTOFF="2021-09"
1 change: 0 additions & 1 deletion .github/ISSUE_TEMPLATE/config.yml

This file was deleted.

10 changes: 4 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@ Thank you!
---
# GPT Discord Bot

Example Discord bot written in Python that uses the [completions API](https://beta.openai.com/docs/api-reference/completions) to have conversations with the `text-davinci-003` model, and the [moderations API](https://beta.openai.com/docs/api-reference/moderations) to filter the messages.

**THIS IS NOT CHATGPT.**
Example Discord bot written in Python that uses the [completions API](https://beta.openai.com/docs/api-reference/completions) to have conversations with the `gpt-3.5-turbo` or `gpt-4` models.

This bot uses the [OpenAI Python Library](https://github.com/openai/openai-python) and [discord.py](https://discordpy.readthedocs.io/).

Expand Down Expand Up @@ -53,9 +51,9 @@ This bot uses the [OpenAI Python Library](https://github.com/openai/openai-pytho

# Optional configuration

1. If you want moderation messages, create and copy the channel id for each server that you want the moderation messages to send to in `SERVER_TO_MODERATION_CHANNEL`. This should be of the format: `server_id:channel_id,server_id_2:channel_id_2`
1. If you want to change the personality of the bot, go to `src/config.yaml` and edit the instructions
1. If you want to change the moderation settings for which messages get flagged or blocked, edit the values in `src/constants.py`. A lower value means less chance of it triggering.
- If you want to change the model used, you can do so in `OPENAI_MODEL`. Currently only `gpt-3.5-turbo` and `gpt-4` work with the present codebase.

- If you want to change the behavior/personality of the bot, change the system prompt in `SYSTEM_MESSAGE`, with optional variables enclosed in `{`curly braces`}`. Currently the only variables available are `current_date` and `knowledge_cutoff`, with the latter being equivalent to the environment variable of the same name. The former is always in ISO 8601 format.

# FAQ

Expand Down
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
discord.py==2.1.*
python-dotenv==0.21.*
openai==0.25.*
openai==0.27.*
PyYAML==6.0
dacite==1.6.*
dacite==1.6.*
1 change: 0 additions & 1 deletion src/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +0,0 @@

45 changes: 2 additions & 43 deletions src/base.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
from dataclasses import dataclass
from typing import Optional, List

SEPARATOR_TOKEN = "<|endoftext|>"
from typing import Optional


@dataclass(frozen=True)
Expand All @@ -10,44 +8,5 @@ class Message:
text: Optional[str] = None

def render(self):
result = self.user + ":"
if self.text is not None:
result += " " + self.text
result = {"role": self.user, "content": self.text}
return result


@dataclass
class Conversation:
messages: List[Message]

def prepend(self, message: Message):
self.messages.insert(0, message)
return self

def render(self):
return f"\n{SEPARATOR_TOKEN}".join(
[message.render() for message in self.messages]
)


@dataclass(frozen=True)
class Config:
name: str
instructions: str
example_conversations: List[Conversation]


@dataclass(frozen=True)
class Prompt:
header: Message
examples: List[Conversation]
convo: Conversation

def render(self):
return f"\n{SEPARATOR_TOKEN}".join(
[self.header.render()]
+ [Message("System", "Example conversations:").render()]
+ [conversation.render() for conversation in self.examples]
+ [Message("System", "Current conversation:").render()]
+ [self.convo.render()],
)
146 changes: 33 additions & 113 deletions src/completion.py
Original file line number Diff line number Diff line change
@@ -1,32 +1,18 @@
import io
from enum import Enum
from dataclasses import dataclass
import openai
from src.moderation import moderate_message
from typing import Optional, List
from src.constants import (
BOT_INSTRUCTIONS,
BOT_NAME,
EXAMPLE_CONVOS,
)
import discord
from src.base import Message, Prompt, Conversation
from src.utils import split_into_shorter_messages, close_thread, logger
from src.moderation import (
send_moderation_flagged_message,
send_moderation_blocked_message,
)

MY_BOT_NAME = BOT_NAME
MY_BOT_EXAMPLE_CONVOS = EXAMPLE_CONVOS
import aiohttp
from src.base import Message
from src.utils import split_into_shorter_messages, logger, close_thread
from src.constants import OPENAI_API_KEY, OPENAI_API_URL, OPENAI_MODEL, MAX_CHARS_PER_REPLY_MSG


class CompletionResult(Enum):
OK = 0
TOO_LONG = 1
INVALID_REQUEST = 2
OTHER_ERROR = 3
MODERATION_FLAGGED = 4
MODERATION_BLOCKED = 5
ERROR = 2


@dataclass
Expand All @@ -37,76 +23,42 @@ class CompletionData:


async def generate_completion_response(
messages: List[Message], user: str
messages: List[Message],
) -> CompletionData:
try:
prompt = Prompt(
header=Message(
"System", f"Instructions for {MY_BOT_NAME}: {BOT_INSTRUCTIONS}"
),
examples=MY_BOT_EXAMPLE_CONVOS,
convo=Conversation(messages + [Message(MY_BOT_NAME)]),
)
rendered = prompt.render()
response = openai.Completion.create(
engine="text-davinci-003",
prompt=rendered,
temperature=1.0,
top_p=0.9,
max_tokens=512,
stop=["<|endoftext|>"],
)
reply = response.choices[0].text.strip()
if reply:
flagged_str, blocked_str = moderate_message(
message=(rendered + reply)[-500:], user=user
)
if len(blocked_str) > 0:
return CompletionData(
status=CompletionResult.MODERATION_BLOCKED,
reply_text=reply,
status_text=f"from_response:{blocked_str}",
)

if len(flagged_str) > 0:
return CompletionData(
status=CompletionResult.MODERATION_FLAGGED,
reply_text=reply,
status_text=f"from_response:{flagged_str}",
)

return CompletionData(
status=CompletionResult.OK, reply_text=reply, status_text=None
)
except openai.error.InvalidRequestError as e:
if "This model's maximum context length" in e.user_message:
return CompletionData(
status=CompletionResult.TOO_LONG, reply_text=None, status_text=str(e)
)
else:
logger.exception(e)
return CompletionData(
status=CompletionResult.INVALID_REQUEST,
reply_text=None,
status_text=str(e),
)
async with aiohttp.ClientSession() as session:
messages = [message.render() for message in messages]
async with session.post(
url=OPENAI_API_URL,
json={
'model': OPENAI_MODEL,
'messages': messages
},
headers={'Content-Type': 'application/json'},
auth=aiohttp.BasicAuth("", OPENAI_API_KEY)
) as r:
if r.status == 200:
js = await r.json()
reply = js['choices'][0]['message']['content']
return CompletionData(status=CompletionResult.OK, reply_text=reply, status_text=None)
else:
return CompletionData(status=CompletionResult.ERROR, reply_text=None, status_text=str(r))
except Exception as e:
logger.exception(e)
return CompletionData(
status=CompletionResult.OTHER_ERROR, reply_text=None, status_text=str(e)
status=CompletionResult.ERROR, reply_text=None, status_text=str(e)
)


async def process_response(
user: str, thread: discord.Thread, response_data: CompletionData
thread: discord.Thread, response_data: CompletionData
):
status = response_data.status
reply_text = response_data.reply_text
status_text = response_data.status_text
if status is CompletionResult.OK or status is CompletionResult.MODERATION_FLAGGED:
sent_message = None
if status is CompletionResult.OK:
if not reply_text:
sent_message = await thread.send(
await thread.send(
embed=discord.Embed(
description=f"**Invalid response** - empty response",
color=discord.Color.yellow(),
Expand All @@ -115,45 +67,13 @@ async def process_response(
else:
shorter_response = split_into_shorter_messages(reply_text)
for r in shorter_response:
sent_message = await thread.send(r)
if status is CompletionResult.MODERATION_FLAGGED:
await send_moderation_flagged_message(
guild=thread.guild,
user=user,
flagged_str=status_text,
message=reply_text,
url=sent_message.jump_url if sent_message else "no url",
)

await thread.send(
embed=discord.Embed(
description=f"⚠️ **This conversation has been flagged by moderation.**",
color=discord.Color.yellow(),
)
)
elif status is CompletionResult.MODERATION_BLOCKED:
await send_moderation_blocked_message(
guild=thread.guild,
user=user,
blocked_str=status_text,
message=reply_text,
)

await thread.send(
embed=discord.Embed(
description=f"❌ **The response has been blocked by moderation.**",
color=discord.Color.red(),
)
)
if len(r) > MAX_CHARS_PER_REPLY_MSG:
file = discord.File(io.StringIO(r), f'message.txt')
await thread.send(file=file)
else:
await thread.send(r)
elif status is CompletionResult.TOO_LONG:
await close_thread(thread)
elif status is CompletionResult.INVALID_REQUEST:
await thread.send(
embed=discord.Embed(
description=f"**Invalid request** - {status_text}",
color=discord.Color.yellow(),
)
)
else:
await thread.send(
embed=discord.Embed(
Expand Down
26 changes: 0 additions & 26 deletions src/config.yaml

This file was deleted.

Loading