-
Notifications
You must be signed in to change notification settings - Fork 2.3k
/
llamaindex.py
56 lines (41 loc) · 1.82 KB
/
llamaindex.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
"""
Import necessary modules
pip install llama-index-core llama-index-readers-file llama-index-embeddings-openai phidata
"""
from pathlib import Path
from shutil import rmtree
import httpx
from phi.agent import Agent
from phi.knowledge.llamaindex import LlamaIndexKnowledgeBase
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
data_dir = Path(__file__).parent.parent.parent.joinpath("wip", "data", "paul_graham")
if data_dir.is_dir():
rmtree(path=data_dir, ignore_errors=True)
data_dir.mkdir(parents=True, exist_ok=True)
url = "https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt"
file_path = data_dir.joinpath("paul_graham_essay.txt")
response = httpx.get(url)
if response.status_code == 200:
with open(file_path, "wb") as file:
file.write(response.content)
print(f"File downloaded and saved as {file_path}")
else:
print("Failed to download the file")
documents = SimpleDirectoryReader(str(data_dir)).load_data()
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
index = VectorStoreIndex(nodes=nodes, storage_context=storage_context)
retriever = VectorIndexRetriever(index)
# Create a knowledge base from the vector store
knowledge_base = LlamaIndexKnowledgeBase(retriever=retriever)
# Create an agent with the knowledge base
agent = Agent(knowledge_base=knowledge_base, search_knowledge=True, debug_mode=True, show_tool_calls=True)
# Use the agent to ask a question and print a response.
agent.print_response("Explain what this text means: low end eats the high end", markdown=True)