Skip to content

Commit

Permalink
handle server errors gracefully
Browse files Browse the repository at this point in the history
...and not with 500

Signed-off-by: Tomas Tomecek <ttomecek@redhat.com>
  • Loading branch information
TomasTomecek committed Aug 14, 2024
1 parent b8d701d commit 3f2c5ee
Showing 1 changed file with 30 additions and 8 deletions.
38 changes: 30 additions & 8 deletions logdetective/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import os
import json

from fastapi import FastAPI
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel

import requests
Expand Down Expand Up @@ -35,7 +35,19 @@ async def analyze_log(build_log: BuildLog):

LOG.info("Getting summary")

log = requests.get(build_log.url, timeout=int(LOG_SOURCE_REQUEST_TIMEOUT)).text
try:
log_request = requests.get(build_log.url, timeout=int(LOG_SOURCE_REQUEST_TIMEOUT))
except requests.RequestException as ex:
raise HTTPException(
status_code=400,
detail=f"We couldn't obtain the logs: {ex}") from ex

if not log_request.ok:
raise HTTPException(status_code=400,
detail="Something went wrong while getting the logs: "
f"[{log_request.status_code}] {log_request.text}")

log = log_request.text
log_summary = extractor(log)

ratio = len(log_summary) / len(log.split('\n'))
Expand All @@ -47,11 +59,21 @@ async def analyze_log(build_log: BuildLog):
"prompt": PROMPT_TEMPLATE.format(log_summary),
"max_tokens": "0"}

# Expects llama-cpp server to run on LLM_CPP_SERVER_ADDRESS:LLM_CPP_SERVER_PORT
response = requests.post(
f"{LLM_CPP_SERVER_ADDRESS}:{LLM_CPP_SERVER_PORT}/v1/completions",
headers={"Content-Type":"application/json"},
data=json.dumps(data),
timeout=int(LLM_CPP_SERVER_TIMEOUT))
try:
# Expects llama-cpp server to run on LLM_CPP_SERVER_ADDRESS:LLM_CPP_SERVER_PORT
response = requests.post(
f"{LLM_CPP_SERVER_ADDRESS}:{LLM_CPP_SERVER_PORT}/v1/completions",
headers={"Content-Type":"application/json"},
data=json.dumps(data),
timeout=int(LLM_CPP_SERVER_TIMEOUT))
except requests.RequestException as ex:
raise HTTPException(
status_code=400,
detail=f"Llama-cpp query failed: {ex}") from ex

if not log_request.ok:
raise HTTPException(
status_code=400,
detail="Something went wrong while getting a response from the llama server: "
f"[{log_request.status_code}] {log_request.text}")
return response.text

0 comments on commit 3f2c5ee

Please sign in to comment.