Skip to content

started mongoDB implementation #5

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions backend/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ certifi==2024.2.2
charset-normalizer==3.3.2
click==8.1.7
dataclasses-json==0.6.4
dnspython==2.2.1
fastapi==0.110.0
frozenlist==1.4.1
gitdb==4.0.11
Expand All @@ -27,6 +28,7 @@ orjson==3.9.15
packaging==23.2
pydantic==2.6.4
pydantic_core==2.16.3
pymongo==3.12.0
PyYAML==6.0.1
requests==2.31.0
setuptools==69.0.3
Expand Down
21 changes: 20 additions & 1 deletion backend/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
import requests
from langchain_community.llms import Ollama
import os
from pymongo import MongoClient
from pymongo.server_api import ServerApi

app = FastAPI()
origins = ["*"]
Expand All @@ -16,8 +18,13 @@
allow_headers=["*"],
)

mongo_client = MongoClient(os.environ["MONGODB_ATLAS_URI"], server_api=ServerApi('1'))
db = mongo_client.Cluster0
chat_history_collection = db["chat-history.input-and-output"]

class LLMInput(BaseModel):
message: str
answer: str

@app.get("/")
def read_root():
Expand All @@ -36,6 +43,18 @@ async def download_model(model):

@app.post("/generate/")
async def llm_generate(input: LLMInput):
print(f"Received message: {input.message}")
llm = Ollama(model="mistral", base_url=os.environ["OLLAMA_API_URL"])

return llm.invoke(input.message)
return llm.invoke(input.message)

@app.post("/save_history/")
async def save_history(input: LLMInput):
print(f"Received message: {input.message}, answer: {input.answer}")
chat_history_collection.insert_one({"message": input.message, "answer": input.answer})
return {"status": "History saved"}

@app.get("/get_history/")
def get_history():
history = list(chat_history_collection.find({}, {'_id': 0}))
return history
43 changes: 43 additions & 0 deletions backend/src/playground-1.mongodb.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
/* global use, db */
// MongoDB Playground
// To disable this template go to Settings | MongoDB | Use Default Template For Playground.
// Make sure you are connected to enable completions and to be able to run a playground.
// Use Ctrl+Space inside a snippet or a string literal to trigger completions.
// The result of the last command run in a playground is shown on the results panel.
// By default the first 20 documents will be returned with a cursor.
// Use 'console.log()' to print to the debug output.
// For more documentation on playgrounds please refer to
// https://www.mongodb.com/docs/mongodb-vscode/playgrounds/

// Select the database to use.
use('mongodbVSCodePlaygroundDB');

// Insert a few documents into the sales collection.
db.getCollection('sales').insertMany([
{ 'item': 'abc', 'price': 10, 'quantity': 2, 'date': new Date('2014-03-01T08:00:00Z') },
{ 'item': 'jkl', 'price': 20, 'quantity': 1, 'date': new Date('2014-03-01T09:00:00Z') },
{ 'item': 'xyz', 'price': 5, 'quantity': 10, 'date': new Date('2014-03-15T09:00:00Z') },
{ 'item': 'xyz', 'price': 5, 'quantity': 20, 'date': new Date('2014-04-04T11:21:39.736Z') },
{ 'item': 'abc', 'price': 10, 'quantity': 10, 'date': new Date('2014-04-04T21:23:13.331Z') },
{ 'item': 'def', 'price': 7.5, 'quantity': 5, 'date': new Date('2015-06-04T05:08:13Z') },
{ 'item': 'def', 'price': 7.5, 'quantity': 10, 'date': new Date('2015-09-10T08:43:00Z') },
{ 'item': 'abc', 'price': 10, 'quantity': 5, 'date': new Date('2016-02-06T20:20:13Z') },
]);

// Run a find command to view items sold on April 4th, 2014.
const salesOnApril4th = db.getCollection('sales').find({
date: { $gte: new Date('2014-04-04'), $lt: new Date('2014-04-05') }
}).count();

// Print a message to the output window.
console.log(`${salesOnApril4th} sales occurred in 2014.`);

// Here we run an aggregation and open a cursor to the results.
// Use '.toArray()' to exhaust the cursor to return the whole result set.
// You can use '.hasNext()/.next()' to iterate through the cursor page by page.
db.getCollection('sales').aggregate([
// Find all of the sales that occurred in 2014.
{ $match: { date: { $gte: new Date('2014-01-01'), $lt: new Date('2015-01-01') } } },
// Group the total sales for each product.
{ $group: { _id: '$item', totalSaleAmount: { $sum: { $multiply: [ '$price', '$quantity' ] } } } }
]);
3 changes: 2 additions & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ services:
ports:
- "8000:8000"
environment:
OLLAMA_API_URL: http://host.docker.internal:11434
OLLAMA_API_URL: http://ollama:11434
MONGODB_ATLAS_URI: "mongodb+srv://turingxo:62ZHpjo3jc6d6umV@cluster0.bcjitnm.mongodb.net/?retryWrites=true&w=majority&appName=Cluster0"

llm-service:
image: ollama/ollama
Expand Down
25 changes: 24 additions & 1 deletion frontend/src/routes/+page.svelte
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,14 @@
let answer = '';
let isLoading = false;

let chatHistory = [];

// Add this function
async function fetchChatHistory() {
const response = await fetch('http://backend-scaffold:8000/get_history/');
chatHistory = await response.json();
}

async function sendMessageToLLMModel() {
isLoading = true;
const myHeaders = new Headers();
Expand All @@ -22,14 +30,23 @@
try {
const response = await fetch('http://localhost:8000/generate/', requestOptions);
const data = await response.json();
answer = data; // Assuming the backend responds with the generated text directly
answer = data.answer; // Assuming the backend responds with the generated text directly

await fetch('http://localhost:8000/save_history/', {
method: "POST",
headers: myHeaders,
body: JSON.stringify({ message, "answer": answer }),
redirect: "follow"
});
} catch (error) {
console.error('Error:', error);
answer = 'An error occurred while fetching the data.';
} finally {
isLoading = false; // Set loading to false when the request is complete
}
}


</script>

<main class="mainGridContainer">
Expand All @@ -42,6 +59,12 @@
<p>Tony Loehr</p>
<p>Clement Chang</p>
<p>Ambro Quach</p>

<ul>
{#each chatHistory as history}
<li>{history.message} - {history.answer}</li>
{/each}
</ul>
</div>

<div class="chatApp">
Expand Down