Skip to content
This repository has been archived by the owner on Nov 4, 2023. It is now read-only.

Commit

Permalink
FROM development TO master (#6)
Browse files Browse the repository at this point in the history
* adds npm cache

* Has chat stream route and http chat route
  • Loading branch information
ryaneggz authored Aug 15, 2023
1 parent 5bafe70 commit 44daefc
Show file tree
Hide file tree
Showing 14 changed files with 279 additions and 27 deletions.
1 change: 1 addition & 0 deletions .example.env
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
OPENAI_API_KEY=
2 changes: 1 addition & 1 deletion .github/workflows/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@v3

- name: Set up Python 3.10
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: 3.9
Expand Down
10 changes: 2 additions & 8 deletions .github/workflows/deploy.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
name: Deploy to Vercel

on:
push:
branches:
Expand All @@ -11,14 +13,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v3

- name: Cache global npm modules
uses: actions/cache@v3
with:
path: /usr/local/lib/node_modules
key: npm-global-modules-${{ runner.os }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
npm-global-modules-${{ runner.os }}-
- name: Install Node.js
uses: actions/setup-node@v3
with:
Expand Down
19 changes: 19 additions & 0 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Python: FastAPI",
"type": "python",
"request": "launch",
"module": "uvicorn",
"args": [
"server.api:app",
"--log-level",
"debug"
],
"jinja": true,
"justMyCode": true,
"envFile": "${workspaceFolder}/.env.local"
},
]
}
43 changes: 43 additions & 0 deletions scripts/build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#!/bin/bash

echo "Is this a release? (yes/no)"
read answer
if [ "$answer" == "yes" ]; then
echo "Release confirmed."
echo ""
## prompt user for semantic tag
echo "Please enter the semantic version: "
read version
echo ""
TAG=$version
echo "You entered $TAG"
if git tag | grep -q $TAG; then
echo "Tag $TAG already exists!"
exit 0
fi
git tag $TAG
git push origin $TAG
elif [ "$answer" == "no" ]; then
TAG=$(git rev-parse HEAD | cut -c1-8)
echo "Creating development build.."
else
echo "Invalid input. Please enter 'yes' or 'no'."
fi

## Go to root of project
cd $(dirname $0)
cd ../
DIR=$(pwd)

IMAGE_URL="promptengineersai/chat-stream-full-stack:$TAG"

docker build -t $IMAGE_URL .
docker push $IMAGE_URL

## Print details
echo ""
echo ""
echo "----------------------------------------------------"
echo ">> Version: $TAG"
echo ">> Image: $IMAGE_URL"
echo "----------------------------------------------------"
24 changes: 24 additions & 0 deletions scripts/changelog.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#!/bin/bash

###################################################################
### Add branch to changelog
###################################################################
# Get the current branch name
branch=$(git rev-parse --abbrev-ref HEAD)

# Get the current date and time
date=$(date +%Y-%m-%d)

# Find the first occurrence of ### Added
line_number=$(grep -n "### Added" Changelog.md | head -1 | cut -d: -f1)

# Check if the branch has already been added to the Changelog
if grep -q " $branch " Changelog.md; then
echo "Branch $branch has already been added to the Changelog."
else
# Add the branch name and date to the Changelog file
sed -i "${line_number}a\ - $branch ($date)" Changelog.md

# Commit the change to the Changelog file
git add Changelog.md
fi
25 changes: 25 additions & 0 deletions scripts/dev.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/bin/bash

echo "Connect to prod? (yes/no)"
read answer
if [ "$answer" == "yes" ]; then
echo "Start Prod Env locally"
echo ""
echo ""
ENV_FILE=.env.production
elif [ "$answer" == "no" ]; then
ENV_FILE=.env.local
echo "Starting Dev Server.."
else
echo "Invalid input. Please enter 'yes' or 'no'."
fi

### Set Environment Variables
set -a # automatically export all variables
source $ENV_FILE
set +a

# Start the server with logging debug mode
uvicorn server.api:app \
--log-level debug \
--reload
17 changes: 17 additions & 0 deletions scripts/test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash

ENV_FILE=.env.test

### Set Environment Variables
set -a # automatically export all variables
source $ENV_FILE
set +a

if [ -z "$1" ]
then
echo ">> Running all test cases"
python3 -m pytest -s tests
else
echo ">> Running single test case"
python3 -m pytest -s $@
fi
Binary file modified server/__pycache__/api.cpython-310.pyc
Binary file not shown.
63 changes: 49 additions & 14 deletions server/api.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,20 @@
"App Entrypoint"
import logging
import openai
import json
import os

from fastapi import FastAPI, Request, HTTPException
from fastapi import FastAPI, Request, Response, HTTPException
from fastapi.responses import StreamingResponse
from fastapi.templating import Jinja2Templates

from server.models.request import Message
from server.models.response import ResponseStatus, ResponseChat
from server.models.request import ReqBodyChat
from server.models.response import ResponseStatus, ResponseChat, ResponseChatStream
from server.services.message_service import send_openai_message, send_openai_message_stream
from server.utils import logger

app = FastAPI(title="🤖 Prompt Engineers AI - Serverless Chat")
os.environ.get('OPENAI_API_KEY')
openai.api_key = os.environ.get('OPENAI_API_KEY')
templates = Jinja2Templates(directory="static")
logger = logging.getLogger("uvicorn.error")

Expand Down Expand Up @@ -41,20 +45,51 @@ async def get_application_version():

#######################################################################
### API Endpoints
#######################################################################
# #######################################################################
@app.post("/chat", tags=["Chat"], response_model=ResponseChat)
async def chat_endpoint(body: Message):
async def chat(body: ReqBodyChat):
try:
result = openai.ChatCompletion.create(
result = send_openai_message(
model=body.model,
messages=body.messages,
temperature=body.temperature
)
logger.debug('[POST /chat] Response: %s', str(result))
data = json.dumps({
'chat': result
})
return Response(
content=data,
media_type='application/json',
status_code=200
)
except HTTPException as err:
logger.error(err.detail)
raise
except Exception as err:
logger.error(err)
raise HTTPException(
status_code=500,
detail=f"An unexpected error occurred. {str(err)}"
)

return {
"data": result
}
except Exception as e:
return {
"error": str(e)
}
#######################################################################
### API Endpoints
#######################################################################


@app.post("/chat/stream", tags=["Chat"], response_model=ResponseChatStream)
def chat_stream(body: ReqBodyChat):
"""Chat endpoint."""
messages = body.messages or []
logger.debug('[POST /chat/stream] Query: %s', str(body))
return StreamingResponse(
send_openai_message_stream(
messages,
body.model,
body.temperature,
True
),
media_type="text/event-stream"
)

8 changes: 4 additions & 4 deletions server/models/request.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
#################################################
## ChatGPT
#################################################
class Message(BaseModel): # pylint: disable=too-few-public-methods
class ReqBodyChat(BaseModel): # pylint: disable=too-few-public-methods
"""A message to send to the chatbot."""
model: Optional[str] = None
messages: Optional[Any] = None
temperature: Optional[float or int] = None
model: str = 'gpt-3.5-turbo'
messages: Any = []
temperature: float or int = 0.0

class Config: # pylint: disable=too-few-public-methods
"""A message to send to the chatbot."""
Expand Down
6 changes: 6 additions & 0 deletions server/models/response.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,3 +39,9 @@ class Config: # pylint: disable=too-few-public-methods
}
}
}


class ResponseChatStream(BaseModel):
sender: str = Field(default='assistant')
message: str = Field(default='Dialog started.')
type: str = Field(default='stream')
48 changes: 48 additions & 0 deletions server/services/message_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import openai
from typing import AsyncIterable

from fastapi import HTTPException, Response

from server.utils import logger, token_stream, end_stream


#######################################################
## Open AI Chat GPT
#######################################################
def send_openai_message(
messages,
model:str,
temperature: float or int = 0.0,
stream: bool = False,
) -> AsyncIterable[str]:
"""Send a message to the chatbot and yield the response."""
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
stream=stream
)
return response

#######################################################
## Open AI Chat GPT
#######################################################
async def send_openai_message_stream(
messages,
model:str,
temperature: float or int = 0.0,
stream: bool = True,
) -> AsyncIterable[str]:
"""Send a message to the chatbot and yield the response."""
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
stream=stream
)
logger.debug('[POST /chat/stream] Stream: %s', str(response))
for chunk in response:
## Would also consider gathering data here
token = chunk['choices'][0]['delta'].get('content', '')
yield token_stream(token)
yield end_stream()
40 changes: 40 additions & 0 deletions server/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import json
import logging

logger = logging.getLogger("uvicorn.error")

def token_stream(token: str):
""" Use server-sent-events to stream the response"""
data = {
'sender': 'assistant',
'message': token,
'type': 'stream'
}
logger.debug('[POST /chat] Stream: %s', str(data))
return f"data: {json.dumps(data)}\n\n"


def end_stream():
"""Send the end of the stream"""
end_content = {
'sender': 'assistant',
'message': "",
'type': 'end'
}
logger.debug('[POST /chat] End: %s', str(end_content))
return f"data: {json.dumps(end_content)}\n\n"

def retrieve_system_message(messages):
"""Retrieve the system message"""
try:
return list(
filter(lambda message: message['role'] == 'system', messages)
)[0]['content']
except IndexError:
return None

def retrieve_chat_messages(messages):
"""Retrieve the chat messages"""
return [
(msg["content"]) for msg in messages if msg["role"] in ["user", "assistant"]
]

0 comments on commit 44daefc

Please sign in to comment.