diff --git a/.github/workflows/add-markdown-examples-to-docs.yml b/.github/workflows/add-markdown-examples-to-docs.yml
new file mode 100644
index 00000000..fc26fb8b
--- /dev/null
+++ b/.github/workflows/add-markdown-examples-to-docs.yml
@@ -0,0 +1,77 @@
+name: Add Notebook Examples to Docs
+
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - 'examples/**'
+ - 'docs/v1/examples/**'
+
+permissions:
+ contents: write
+ pull-requests: write
+
+jobs:
+ add-notebook-examples-to-docs:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
+
+ - name: Install dependencies
+ run: |
+ pip install jupyter nbconvert
+
+ - name: Convert notebooks to markdown and add to docs
+ run: |
+ set -x # Enable debug mode
+ for file in docs/v1/examples/*.mdx; do
+ echo "Processing file: $file"
+ source_file=$(grep -oP '(?<=\{/\* SOURCE_FILE: ).*(?= \*/\})' "$file" || true)
+ if [[ -z "$source_file" ]]; then
+ echo "Error: No source file found in $file, skipping..." >&2
+ continue
+ fi
+ echo "Source file: $source_file"
+ if [[ -f "$source_file" ]]; then
+ echo "Converting notebook to markdown"
+ jupyter nbconvert --to markdown "$source_file" || { echo "Error: Failed to convert $source_file" >&2; continue; }
+ markdown_file="${source_file%.ipynb}.md"
+ echo "Appending markdown to $file"
+ echo -e "\n\n" >> "$file"
+ cat "$markdown_file" >> "$file" || { echo "Error: Failed to append markdown to $file" >&2; continue; }
+ rm "$markdown_file" || { echo "Error: Failed to remove $markdown_file" >&2; continue; }
+ else
+ echo "Error: Source file not found: $source_file" >&2
+ fi
+ done
+
+ - name: Commit changes
+ run: |
+ git config --local user.email "action@github.com"
+ git config --local user.name "GitHub Action"
+ git add docs/v1/examples/*.mdx
+ git diff --quiet && git diff --staged --quiet || git commit -m "GitHub Action: Update examples in docs from notebooks"
+
+ - name: Create Pull Request
+ uses: peter-evans/create-pull-request@v5
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ commit-message: Update examples in docs from notebooks
+ title: 'Update examples in docs from notebooks'
+ body: |
+ This PR updates the examples in the docs from the corresponding notebooks.
+ Please review the changes before merging.
+ branch: update-docs-examples
+ base: main
+# - name: Push changes
+# uses: ad-m/github-push-action@master
+# with:
+# github_token: ${{ secrets.GITHUB_TOKEN }}
+# branch: main
\ No newline at end of file
diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml
index c11602bf..f627c1ee 100644
--- a/.github/workflows/codecov.yml
+++ b/.github/workflows/codecov.yml
@@ -2,9 +2,15 @@ name: Codecov
on:
push:
- branches: [ main ]
+ branches:
+ - main
+ paths:
+ - 'agentops/**'
pull_request:
- branches: [ main ]
+ branches:
+ - main
+ paths:
+ - 'agentops/**'
jobs:
test:
diff --git a/.github/workflows/python-testing.yml b/.github/workflows/python-testing.yml
index e06a2b69..2ecc3471 100644
--- a/.github/workflows/python-testing.yml
+++ b/.github/workflows/python-testing.yml
@@ -4,9 +4,15 @@ on:
push:
branches:
- main
+ paths:
+ - 'agentops/**'
+ - 'tests/**'
pull_request:
branches:
- main
+ paths:
+ - 'agentops/**'
+ - 'tests/**'
jobs:
build:
diff --git a/.github/workflows/tach-check.yml b/.github/workflows/tach-check.yml
index 2bbbd15e..32208e6a 100644
--- a/.github/workflows/tach-check.yml
+++ b/.github/workflows/tach-check.yml
@@ -1,7 +1,11 @@
-
name: Tach Check
-on: [pull_request]
+on:
+ pull_request:
+ paths:
+ - 'agentops/**'
+ - 'tests/**'
+ - 'examples/**'
jobs:
tach-check:
diff --git a/.github/workflows/test-notebooks.yml b/.github/workflows/test-notebooks.yml
index 406a7351..303437e0 100644
--- a/.github/workflows/test-notebooks.yml
+++ b/.github/workflows/test-notebooks.yml
@@ -1,14 +1,7 @@
name: Test Notebooks
on:
- push:
- branches:
- - main
- paths:
- - "agentops/**"
- - "examples/**"
- - "tests/**"
- - ".github/workflows/test-notebooks.yml"
- pull_request_target:
+ pull_request:
+ types: [closed]
branches:
- main
paths:
@@ -43,13 +36,17 @@ jobs:
echo "GROQ_API_KEY=${{ secrets.GROQ_API_KEY }}" >> .env
echo "MULTION_API_KEY=${{ secrets.MULTION_API_KEY }}" >> .env
echo "SERPER_API_KEY=${{ secrets.SERPER_API_KEY }}" >> .env
+ - name: Install AgentOps from main branch and remove agentops install from notebooks
+ run: |
+ pip install git+https://github.com/AgentOps-AI/agentops.git@main
+ find . -name '*.ipynb' -exec sed -i '/^%pip install.*agentops/d' {} +
- name: Run notebooks and check for errors
run: |
mkdir -p logs
exit_code=0
exclude_notebooks=(
- "./examples/crew/job_posting.ipynb",
+ "./examples/crewai_examples/job_posting.ipynb",
"./examples/demos/agentchat_agentops.ipynb"
)
diff --git a/agentops/__init__.py b/agentops/__init__.py
index 19983548..5a054f75 100755
--- a/agentops/__init__.py
+++ b/agentops/__init__.py
@@ -9,6 +9,8 @@
from .log_config import logger
from .session import Session
import threading
+from importlib.metadata import version as get_version
+from packaging import version
try:
from .partners.langchain_callback_handler import (
@@ -23,7 +25,12 @@
Client().add_default_tags(["autogen"])
if "crewai" in sys.modules:
- Client().configure(instrument_llm_calls=False)
+ crew_version = version.parse(get_version("crewai"))
+
+ # uses langchain, greater versions will use litellm and default is to instrument
+ if crew_version < version.parse("0.56.0"):
+ Client().configure(instrument_llm_calls=False)
+
Client().add_default_tags(["crewai"])
diff --git a/agentops/client.py b/agentops/client.py
index f2fc4dea..50da1ed5 100644
--- a/agentops/client.py
+++ b/agentops/client.py
@@ -38,6 +38,7 @@ def __init__(self):
self._llm_tracker: Optional[LlmTracker] = None
self._sessions: List[Session] = active_sessions
self._config = Configuration()
+ self._pre_init_queue = {"agents": []}
self.configure(
api_key=os.environ.get("AGENTOPS_API_KEY"),
@@ -106,6 +107,13 @@ def initialize(self) -> Union[Session, None]:
if self._config.auto_start_session:
session = self.start_session()
+ if session:
+ for agent_args in self._pre_init_queue["agents"]:
+ session.create_agent(
+ name=agent_args["name"], agent_id=agent_args["agent_id"]
+ )
+ self._pre_init_queue["agents"] = []
+
return session
def _initialize_partner_framework(self) -> None:
@@ -234,6 +242,13 @@ def start_session(
config=self._config,
)
+ if self._pre_init_queue["agents"] and len(self._pre_init_queue["agents"]) > 0:
+ for agent_args in self._pre_init_queue["agents"]:
+ session.create_agent(
+ name=agent_args["name"], agent_id=agent_args["agent_id"]
+ )
+ self._pre_init_queue["agents"] = []
+
if not session.is_running:
return logger.error("Failed to start session")
@@ -294,7 +309,9 @@ def create_agent(
# if no session passed, assume single session
session = self._safe_get_session()
if session is None:
- return
+ self._pre_init_queue["agents"].append(
+ {"name": name, "agent_id": agent_id}
+ )
session.create_agent(name=name, agent_id=agent_id)
return agent_id
diff --git a/agentops/decorators.py b/agentops/decorators.py
index c1930b44..d291c416 100644
--- a/agentops/decorators.py
+++ b/agentops/decorators.py
@@ -326,12 +326,6 @@ def new_init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
- if not Client().is_initialized:
- Client().add_pre_init_warning(
- f"Failed to track an agent {name} because agentops.init() was not "
- + "called before initializing the agent with the @track_agent decorator."
- )
-
self.agent_ops_agent_id = str(uuid4())
session = kwargs.get("session", None)
@@ -345,12 +339,10 @@ def new_init(self, *args, **kwargs):
)
except AttributeError as e:
Client().add_pre_init_warning(
- f"Failed to track an agent {name} because agentops.init() was not "
- + "called before initializing the agent with the @track_agent decorator."
+ f"Failed to track an agent {name} with the @track_agent decorator."
)
logger.warning(
- "Failed to track an agent. This often happens if agentops.init() was not "
- "called before initializing an agent with the @track_agent decorator."
+ "Failed to track an agent with the @track_agent decorator."
)
original_init(self, *args, **kwargs)
diff --git a/agentops/llms/__init__.py b/agentops/llms/__init__.py
index e3e6f7cf..8c7ba5f4 100644
--- a/agentops/llms/__init__.py
+++ b/agentops/llms/__init__.py
@@ -43,7 +43,6 @@ class LlmTracker:
def __init__(self, client):
self.client = client
- self.completion = ""
def override_api(self):
"""
diff --git a/agentops/llms/anthropic.py b/agentops/llms/anthropic.py
index 322d2181..e0e78891 100644
--- a/agentops/llms/anthropic.py
+++ b/agentops/llms/anthropic.py
@@ -31,27 +31,27 @@ def handle_response(
from anthropic.resources import AsyncMessages
from anthropic.types import Message
- self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
+ llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
if session is not None:
- self.llm_event.session_id = session.session_id
+ llm_event.session_id = session.session_id
def handle_stream_chunk(chunk: Message):
try:
# We take the first chunk and accumulate the deltas from all subsequent chunks to build one full chat completion
if chunk.type == "message_start":
- self.llm_event.returns = chunk
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.model = kwargs["model"]
- self.llm_event.prompt = kwargs["messages"]
- self.llm_event.prompt_tokens = chunk.message.usage.input_tokens
- self.llm_event.completion = {
+ llm_event.returns = chunk
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.model = kwargs["model"]
+ llm_event.prompt = kwargs["messages"]
+ llm_event.prompt_tokens = chunk.message.usage.input_tokens
+ llm_event.completion = {
"role": chunk.message.role,
"content": "", # Always returned as [] in this instance type
}
elif chunk.type == "content_block_start":
if chunk.content_block.type == "text":
- self.llm_event.completion["content"] += chunk.content_block.text
+ llm_event.completion["content"] += chunk.content_block.text
elif chunk.content_block.type == "tool_use":
self.tool_id = chunk.content_block.id
@@ -62,7 +62,7 @@ def handle_stream_chunk(chunk: Message):
elif chunk.type == "content_block_delta":
if chunk.delta.type == "text_delta":
- self.llm_event.completion["content"] += chunk.delta.text
+ llm_event.completion["content"] += chunk.delta.text
elif chunk.delta.type == "input_json_delta":
self.tool_event[self.tool_id].logs[
@@ -73,15 +73,15 @@ def handle_stream_chunk(chunk: Message):
pass
elif chunk.type == "message_delta":
- self.llm_event.completion_tokens = chunk.usage.output_tokens
+ llm_event.completion_tokens = chunk.usage.output_tokens
elif chunk.type == "message_stop":
- self.llm_event.end_timestamp = get_ISO_time()
- self._safe_record(session, self.llm_event)
+ llm_event.end_timestamp = get_ISO_time()
+ self._safe_record(session, llm_event)
except Exception as e:
self._safe_record(
- session, ErrorEvent(trigger_event=self.llm_event, exception=e)
+ session, ErrorEvent(trigger_event=llm_event, exception=e)
)
kwargs_str = pprint.pformat(kwargs)
@@ -124,23 +124,21 @@ async def async_generator():
# Handle object responses
try:
- self.llm_event.returns = response.model_dump()
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.prompt = kwargs["messages"]
- self.llm_event.prompt_tokens = response.usage.input_tokens
- self.llm_event.completion = {
+ llm_event.returns = response.model_dump()
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.prompt = kwargs["messages"]
+ llm_event.prompt_tokens = response.usage.input_tokens
+ llm_event.completion = {
"role": "assistant",
"content": response.content[0].text,
}
- self.llm_event.completion_tokens = response.usage.output_tokens
- self.llm_event.model = response.model
- self.llm_event.end_timestamp = get_ISO_time()
+ llm_event.completion_tokens = response.usage.output_tokens
+ llm_event.model = response.model
+ llm_event.end_timestamp = get_ISO_time()
- self._safe_record(session, self.llm_event)
+ self._safe_record(session, llm_event)
except Exception as e:
- self._safe_record(
- session, ErrorEvent(trigger_event=self.llm_event, exception=e)
- )
+ self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
logger.warning(
diff --git a/agentops/llms/cohere.py b/agentops/llms/cohere.py
index 68658761..ad8f93a5 100644
--- a/agentops/llms/cohere.py
+++ b/agentops/llms/cohere.py
@@ -52,9 +52,9 @@ def handle_response(
# from cohere.types.chat import ChatGenerationChunk
# NOTE: Cohere only returns one message and its role will be CHATBOT which we are coercing to "assistant"
- self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
+ llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
if session is not None:
- self.llm_event.session_id = session.session_id
+ llm_event.session_id = session.session_id
self.action_events = {}
@@ -62,22 +62,22 @@ def handle_stream_chunk(chunk, session: Optional[Session] = None):
# We take the first chunk and accumulate the deltas from all subsequent chunks to build one full chat completion
if isinstance(chunk, StreamedChatResponse_StreamStart):
- self.llm_event.returns = chunk
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.model = kwargs.get("model", "command-r-plus")
- self.llm_event.prompt = kwargs["message"]
- self.llm_event.completion = ""
+ llm_event.returns = chunk
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.model = kwargs.get("model", "command-r-plus")
+ llm_event.prompt = kwargs["message"]
+ llm_event.completion = ""
return
try:
if isinstance(chunk, StreamedChatResponse_StreamEnd):
# StreamedChatResponse_TextGeneration = LLMEvent
- self.llm_event.completion = {
+ llm_event.completion = {
"role": "assistant",
"content": chunk.response.text,
}
- self.llm_event.end_timestamp = get_ISO_time()
- self._safe_record(session, self.llm_event)
+ llm_event.end_timestamp = get_ISO_time()
+ self._safe_record(session, llm_event)
# StreamedChatResponse_SearchResults = ActionEvent
search_results = chunk.response.search_results
@@ -115,7 +115,7 @@ def handle_stream_chunk(chunk, session: Optional[Session] = None):
self._safe_record(session, action_event)
elif isinstance(chunk, StreamedChatResponse_TextGeneration):
- self.llm_event.completion += chunk.text
+ llm_event.completion += chunk.text
elif isinstance(chunk, StreamedChatResponse_ToolCallsGeneration):
pass
elif isinstance(chunk, StreamedChatResponse_CitationGeneration):
@@ -139,7 +139,7 @@ def handle_stream_chunk(chunk, session: Optional[Session] = None):
except Exception as e:
self._safe_record(
- session, ErrorEvent(trigger_event=self.llm_event, exception=e)
+ session, ErrorEvent(trigger_event=llm_event, exception=e)
)
kwargs_str = pprint.pformat(kwargs)
@@ -175,15 +175,15 @@ def generator():
# Not enough to record StreamedChatResponse_ToolCallsGeneration because the tool may have not gotten called
try:
- self.llm_event.returns = response
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.prompt = []
+ llm_event.returns = response
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.prompt = []
if response.chat_history:
role_map = {"USER": "user", "CHATBOT": "assistant", "SYSTEM": "system"}
for i in range(len(response.chat_history) - 1):
message = response.chat_history[i]
- self.llm_event.prompt.append(
+ llm_event.prompt.append(
{
"role": role_map.get(message.role, message.role),
"content": message.message,
@@ -191,19 +191,17 @@ def generator():
)
last_message = response.chat_history[-1]
- self.llm_event.completion = {
+ llm_event.completion = {
"role": role_map.get(last_message.role, last_message.role),
"content": last_message.message,
}
- self.llm_event.prompt_tokens = response.meta.tokens.input_tokens
- self.llm_event.completion_tokens = response.meta.tokens.output_tokens
- self.llm_event.model = kwargs.get("model", "command-r-plus")
+ llm_event.prompt_tokens = int(response.meta.tokens.input_tokens)
+ llm_event.completion_tokens = int(response.meta.tokens.output_tokens)
+ llm_event.model = kwargs.get("model", "command-r-plus")
- self._safe_record(session, self.llm_event)
+ self._safe_record(session, llm_event)
except Exception as e:
- self._safe_record(
- session, ErrorEvent(trigger_event=self.llm_event, exception=e)
- )
+ self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
logger.warning(
diff --git a/agentops/llms/groq.py b/agentops/llms/groq.py
index 7d5f6800..ca869638 100644
--- a/agentops/llms/groq.py
+++ b/agentops/llms/groq.py
@@ -37,21 +37,21 @@ def handle_response(
from groq.resources.chat import AsyncCompletions
from groq.types.chat import ChatCompletionChunk
- self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
+ llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
if session is not None:
- self.llm_event.session_id = session.session_id
+ llm_event.session_id = session.session_id
def handle_stream_chunk(chunk: ChatCompletionChunk):
# NOTE: prompt/completion usage not returned in response when streaming
# We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion
- if self.llm_event.returns == None:
- self.llm_event.returns = chunk
+ if llm_event.returns == None:
+ llm_event.returns = chunk
try:
- accumulated_delta = self.llm_event.returns.choices[0].delta
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.model = chunk.model
- self.llm_event.prompt = kwargs["messages"]
+ accumulated_delta = llm_event.returns.choices[0].delta
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.model = chunk.model
+ llm_event.prompt = kwargs["messages"]
# NOTE: We assume for completion only choices[0] is relevant
choice = chunk.choices[0]
@@ -70,21 +70,19 @@ def handle_stream_chunk(chunk: ChatCompletionChunk):
if choice.finish_reason:
# Streaming is done. Record LLMEvent
- self.llm_event.returns.choices[0].finish_reason = (
- choice.finish_reason
- )
- self.llm_event.completion = {
+ llm_event.returns.choices[0].finish_reason = choice.finish_reason
+ llm_event.completion = {
"role": accumulated_delta.role,
"content": accumulated_delta.content,
"function_call": accumulated_delta.function_call,
"tool_calls": accumulated_delta.tool_calls,
}
- self.llm_event.end_timestamp = get_ISO_time()
+ llm_event.end_timestamp = get_ISO_time()
- self._safe_record(session, self.llm_event)
+ self._safe_record(session, llm_event)
except Exception as e:
self._safe_record(
- session, ErrorEvent(trigger_event=self.llm_event, exception=e)
+ session, ErrorEvent(trigger_event=llm_event, exception=e)
)
kwargs_str = pprint.pformat(kwargs)
@@ -127,19 +125,17 @@ async def async_generator():
# v1.0.0+ responses are objects
try:
- self.llm_event.returns = response.model_dump()
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.prompt = kwargs["messages"]
- self.llm_event.prompt_tokens = response.usage.prompt_tokens
- self.llm_event.completion = response.choices[0].message.model_dump()
- self.llm_event.completion_tokens = response.usage.completion_tokens
- self.llm_event.model = response.model
-
- self._safe_record(session, self.llm_event)
+ llm_event.returns = response.model_dump()
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.prompt = kwargs["messages"]
+ llm_event.prompt_tokens = response.usage.prompt_tokens
+ llm_event.completion = response.choices[0].message.model_dump()
+ llm_event.completion_tokens = response.usage.completion_tokens
+ llm_event.model = response.model
+
+ self._safe_record(session, llm_event)
except Exception as e:
- self._safe_record(
- session, ErrorEvent(trigger_event=self.llm_event, exception=e)
- )
+ self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
diff --git a/agentops/llms/litellm.py b/agentops/llms/litellm.py
index 053c4251..30b4c25a 100644
--- a/agentops/llms/litellm.py
+++ b/agentops/llms/litellm.py
@@ -49,21 +49,21 @@ def handle_response(
from openai.types.chat import ChatCompletionChunk
from litellm.utils import CustomStreamWrapper
- self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
+ llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
if session is not None:
- self.llm_event.session_id = session.session_id
+ llm_event.session_id = session.session_id
def handle_stream_chunk(chunk: ChatCompletionChunk):
# NOTE: prompt/completion usage not returned in response when streaming
# We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion
- if self.llm_event.returns == None:
- self.llm_event.returns = chunk
+ if llm_event.returns == None:
+ llm_event.returns = chunk
try:
- accumulated_delta = self.llm_event.returns.choices[0].delta
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.model = chunk.model
- self.llm_event.prompt = kwargs["messages"]
+ accumulated_delta = llm_event.returns.choices[0].delta
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.model = chunk.model
+ llm_event.prompt = kwargs["messages"]
# NOTE: We assume for completion only choices[0] is relevant
choice = chunk.choices[0]
@@ -82,21 +82,19 @@ def handle_stream_chunk(chunk: ChatCompletionChunk):
if choice.finish_reason:
# Streaming is done. Record LLMEvent
- self.llm_event.returns.choices[0].finish_reason = (
- choice.finish_reason
- )
- self.llm_event.completion = {
+ llm_event.returns.choices[0].finish_reason = choice.finish_reason
+ llm_event.completion = {
"role": accumulated_delta.role,
"content": accumulated_delta.content,
"function_call": accumulated_delta.function_call,
"tool_calls": accumulated_delta.tool_calls,
}
- self.llm_event.end_timestamp = get_ISO_time()
+ llm_event.end_timestamp = get_ISO_time()
- self._safe_record(session, self.llm_event)
+ self._safe_record(session, llm_event)
except Exception as e:
self._safe_record(
- session, ErrorEvent(trigger_event=self.llm_event, exception=e)
+ session, ErrorEvent(trigger_event=llm_event, exception=e)
)
kwargs_str = pprint.pformat(kwargs)
@@ -149,19 +147,17 @@ async def async_generator():
# v1.0.0+ responses are objects
try:
- self.llm_event.returns = response
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.prompt = kwargs["messages"]
- self.llm_event.prompt_tokens = response.usage.prompt_tokens
- self.llm_event.completion = response.choices[0].message.model_dump()
- self.llm_event.completion_tokens = response.usage.completion_tokens
- self.llm_event.model = response.model
-
- self._safe_record(session, self.llm_event)
+ llm_event.returns = response
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.prompt = kwargs["messages"]
+ llm_event.prompt_tokens = response.usage.prompt_tokens
+ llm_event.completion = response.choices[0].message.model_dump()
+ llm_event.completion_tokens = response.usage.completion_tokens
+ llm_event.model = response.model
+
+ self._safe_record(session, llm_event)
except Exception as e:
- self._safe_record(
- session, ErrorEvent(trigger_event=self.llm_event, exception=e)
- )
+ self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
diff --git a/agentops/llms/ollama.py b/agentops/llms/ollama.py
index bdcb2190..e5779283 100644
--- a/agentops/llms/ollama.py
+++ b/agentops/llms/ollama.py
@@ -19,25 +19,25 @@ class OllamaProvider(InstrumentedProvider):
def handle_response(
self, response, kwargs, init_timestamp, session: Optional[Session] = None
) -> dict:
- self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
+ llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
def handle_stream_chunk(chunk: dict):
message = chunk.get("message", {"role": None, "content": ""})
if chunk.get("done"):
- self.llm_event.completion["content"] += message.get("content")
- self.llm_event.end_timestamp = get_ISO_time()
- self.llm_event.model = f'ollama/{chunk.get("model")}'
- self.llm_event.returns = chunk
- self.llm_event.returns["message"] = self.llm_event.completion
- self.llm_event.prompt = kwargs["messages"]
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.client.record(self.llm_event)
-
- if self.llm_event.completion is None:
- self.llm_event.completion = message
+ llm_event.completion["content"] += message.get("content")
+ llm_event.end_timestamp = get_ISO_time()
+ llm_event.model = f'ollama/{chunk.get("model")}'
+ llm_event.returns = chunk
+ llm_event.returns["message"] = llm_event.completion
+ llm_event.prompt = kwargs["messages"]
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ self.client.record(llm_event)
+
+ if llm_event.completion is None:
+ llm_event.completion = message
else:
- self.llm_event.completion["content"] += message.get("content")
+ llm_event.completion["content"] += message.get("content")
if inspect.isgenerator(response):
@@ -48,15 +48,15 @@ def generator():
return generator()
- self.llm_event.end_timestamp = get_ISO_time()
+ llm_event.end_timestamp = get_ISO_time()
- self.llm_event.model = f'ollama/{response["model"]}'
- self.llm_event.returns = response
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.prompt = kwargs["messages"]
- self.llm_event.completion = response["message"]
+ llm_event.model = f'ollama/{response["model"]}'
+ llm_event.returns = response
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.prompt = kwargs["messages"]
+ llm_event.completion = response["message"]
- self._safe_record(session, self.llm_event)
+ self._safe_record(session, llm_event)
return response
def override(self):
@@ -65,7 +65,7 @@ def override(self):
self._override_chat_async_client()
def undo_override(self):
- if original_func is not None:
+ if original_func is not None and original_func != {}:
import ollama
ollama.chat = original_func["ollama.chat"]
diff --git a/agentops/llms/openai.py b/agentops/llms/openai.py
index 0fd31a1d..c99523d7 100644
--- a/agentops/llms/openai.py
+++ b/agentops/llms/openai.py
@@ -30,21 +30,21 @@ def handle_response(
from openai.resources import AsyncCompletions
from openai.types.chat import ChatCompletionChunk
- self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
+ llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
if session is not None:
- self.llm_event.session_id = session.session_id
+ llm_event.session_id = session.session_id
def handle_stream_chunk(chunk: ChatCompletionChunk):
# NOTE: prompt/completion usage not returned in response when streaming
# We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion
- if self.llm_event.returns == None:
- self.llm_event.returns = chunk
+ if llm_event.returns == None:
+ llm_event.returns = chunk
try:
- accumulated_delta = self.llm_event.returns.choices[0].delta
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.model = chunk.model
- self.llm_event.prompt = kwargs["messages"]
+ accumulated_delta = llm_event.returns.choices[0].delta
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.model = chunk.model
+ llm_event.prompt = kwargs["messages"]
# NOTE: We assume for completion only choices[0] is relevant
choice = chunk.choices[0]
@@ -63,21 +63,19 @@ def handle_stream_chunk(chunk: ChatCompletionChunk):
if choice.finish_reason:
# Streaming is done. Record LLMEvent
- self.llm_event.returns.choices[0].finish_reason = (
- choice.finish_reason
- )
- self.llm_event.completion = {
+ llm_event.returns.choices[0].finish_reason = choice.finish_reason
+ llm_event.completion = {
"role": accumulated_delta.role,
"content": accumulated_delta.content,
"function_call": accumulated_delta.function_call,
"tool_calls": accumulated_delta.tool_calls,
}
- self.llm_event.end_timestamp = get_ISO_time()
+ llm_event.end_timestamp = get_ISO_time()
- self._safe_record(session, self.llm_event)
+ self._safe_record(session, llm_event)
except Exception as e:
self._safe_record(
- session, ErrorEvent(trigger_event=self.llm_event, exception=e)
+ session, ErrorEvent(trigger_event=llm_event, exception=e)
)
kwargs_str = pprint.pformat(kwargs)
@@ -120,19 +118,17 @@ async def async_generator():
# v1.0.0+ responses are objects
try:
- self.llm_event.returns = response
- self.llm_event.agent_id = check_call_stack_for_agent_id()
- self.llm_event.prompt = kwargs["messages"]
- self.llm_event.prompt_tokens = response.usage.prompt_tokens
- self.llm_event.completion = response.choices[0].message.model_dump()
- self.llm_event.completion_tokens = response.usage.completion_tokens
- self.llm_event.model = response.model
-
- self._safe_record(session, self.llm_event)
+ llm_event.returns = response
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.prompt = kwargs["messages"]
+ llm_event.prompt_tokens = response.usage.prompt_tokens
+ llm_event.completion = response.choices[0].message.model_dump()
+ llm_event.completion_tokens = response.usage.completion_tokens
+ llm_event.model = response.model
+
+ self._safe_record(session, llm_event)
except Exception as e:
- self._safe_record(
- session, ErrorEvent(trigger_event=self.llm_event, exception=e)
- )
+ self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
diff --git a/agentops/time_travel.py b/agentops/time_travel.py
index 14e8b2af..6c1c7588 100644
--- a/agentops/time_travel.py
+++ b/agentops/time_travel.py
@@ -5,6 +5,8 @@
from .exceptions import ApiServerException
from .singleton import singleton
+ttd_prepend_string = "🖇️ Agentops: ⏰ Time Travel |"
+
@singleton
class TimeTravel:
@@ -47,9 +49,9 @@ def fetch_time_travel_id(ttd_id):
set_time_travel_active_state(True)
except ApiServerException as e:
- manage_time_travel_state(activated=False, error=e)
+ print(f"{ttd_prepend_string} Error - {e}")
except Exception as e:
- manage_time_travel_state(activated=False, error=e)
+ print(f"{ttd_prepend_string} Error - {e}")
def fetch_completion_override_from_time_travel_cache(kwargs):
@@ -64,14 +66,14 @@ def fetch_completion_override_from_time_travel_cache(kwargs):
def find_cache_hit(prompt_messages, completion_overrides):
if not isinstance(prompt_messages, (list, tuple)):
print(
- "Time Travel Error - unexpected type for prompt_messages. Expected 'list' or 'tuple'. Got ",
+ f"{ttd_prepend_string} Error - unexpected type for prompt_messages. Expected 'list' or 'tuple'. Got ",
type(prompt_messages),
)
return None
if not isinstance(completion_overrides, dict):
print(
- "Time Travel Error - unexpected type for completion_overrides. Expected 'dict'. Got ",
+ f"{ttd_prepend_string} Error - unexpected type for completion_overrides. Expected 'dict'. Got ",
type(completion_overrides),
)
return None
@@ -80,7 +82,7 @@ def find_cache_hit(prompt_messages, completion_overrides):
completion_override_dict = eval(key)
if not isinstance(completion_override_dict, dict):
print(
- "Time Travel Error - unexpected type for completion_override_dict. Expected 'dict'. Got ",
+ f"{ttd_prepend_string} Error - unexpected type for completion_override_dict. Expected 'dict'. Got ",
type(completion_override_dict),
)
continue
@@ -88,7 +90,7 @@ def find_cache_hit(prompt_messages, completion_overrides):
cached_messages = completion_override_dict.get("messages")
if not isinstance(cached_messages, list):
print(
- "Time Travel Error - unexpected type for cached_messages. Expected 'list'. Got ",
+ f"{ttd_prepend_string} Error - unexpected type for cached_messages. Expected 'list'. Got ",
type(cached_messages),
)
continue
@@ -105,10 +107,12 @@ def find_cache_hit(prompt_messages, completion_overrides):
return value
except (SyntaxError, ValueError, TypeError) as e:
print(
- f"Time Travel Error - Error processing completion_overrides item: {e}"
+ f"{ttd_prepend_string} Error - Error processing completion_overrides item: {e}"
)
except Exception as e:
- print(f"Time Travel Error - Unexpected error in find_cache_hit: {e}")
+ print(
+ f"{ttd_prepend_string} Error - Unexpected error in find_cache_hit: {e}"
+ )
return None
@@ -120,14 +124,10 @@ def check_time_travel_active():
try:
with open(config_file_path, "r") as config_file:
config = yaml.safe_load(config_file)
- if config.get("Time_Travel_Debugging_Active", True):
- manage_time_travel_state(activated=True)
- return True
+ return config.get("Time_Travel_Debugging_Active", False)
except FileNotFoundError:
return False
- return False
-
def set_time_travel_active_state(is_active: bool):
config_path = ".agentops_time_travel.yaml"
@@ -144,30 +144,11 @@ def set_time_travel_active_state(is_active: bool):
yaml.dump(config, config_file)
except:
print(
- f"🖇 AgentOps: Unable to write to {config_path}. Time Travel not activated"
+ f"{ttd_prepend_string} Error - Unable to write to {config_path}. Time Travel not activated"
)
return
- if is_active:
- manage_time_travel_state(activated=True)
- print("🖇 AgentOps: Time Travel Activated")
- else:
- manage_time_travel_state(activated=False)
- print("🖇 AgentOps: Time Travel Deactivated")
-
-
-def add_time_travel_terminal_indicator():
- print(f"🖇️ ⏰ | ", end="")
-
-
-def reset_terminal():
- print("\033[0m", end="")
-
-
-def manage_time_travel_state(activated=False, error=None):
- if activated:
- add_time_travel_terminal_indicator()
+ if is_active:
+ print(f"{ttd_prepend_string} Activated")
else:
- reset_terminal()
- if error is not None:
- print(f"🖇 Deactivating Time Travel. Error with configuration: {error}")
+ print(f"{ttd_prepend_string} Deactivated")
diff --git a/docs/snippets/add-env-tooltip.mdx b/docs/snippets/add-env-tooltip.mdx
index 240a4019..a3b1c6a0 100644
--- a/docs/snippets/add-env-tooltip.mdx
+++ b/docs/snippets/add-env-tooltip.mdx
@@ -1,3 +1,3 @@
- Set your API Key as an `.env` variable for easy access.
+ Set your API key as an `.env` variable for easy access.
\ No newline at end of file
diff --git a/docs/snippets/github-stars.mdx b/docs/snippets/github-stars.mdx
index e31a312c..1e73b3cc 100644
--- a/docs/snippets/github-stars.mdx
+++ b/docs/snippets/github-stars.mdx
@@ -1 +1 @@
-Look useful? [Star us on Github](https://github.com/AgentOps-AI/agentops)! (you may be our 2,000th 😊)
\ No newline at end of file
+Look useful? [Star us on GitHub](https://github.com/AgentOps-AI/agentops)! (you may be our 2,000th 😊)
\ No newline at end of file
diff --git a/docs/v0/recording-events.mdx b/docs/v0/recording-events.mdx
index c831a5a2..ff5eccbe 100644
--- a/docs/v0/recording-events.mdx
+++ b/docs/v0/recording-events.mdx
@@ -18,7 +18,7 @@ def sample_function(...):
...
```
-The the decorator will record the function's parameters, returns, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several openai calls, then each openai call will show in the replay graph as a child of the decorated function.
+The decorator will record the function's parameters, returns, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several openai calls, then each openai call will show in the replay graph as a child of the decorated function.
record_action:
@@ -36,7 +36,7 @@ ao_client.record(Event("event_type1"))
```
In AgentOps, each session is associated with a number of "Events". Events have
-must have an "event_type" which is any abitrary string of your choice. It might be something
+must have an "event_type" which is any arbitrary string of your choice. It might be something
like "OpenAI Call". Events can also have other information such as the parameters of the operation,
the returned data, alongside tags, etc.
diff --git a/docs/v1/concepts/decorators.mdx b/docs/v1/concepts/decorators.mdx
index 49425e7a..01f372e3 100644
--- a/docs/v1/concepts/decorators.mdx
+++ b/docs/v1/concepts/decorators.mdx
@@ -17,13 +17,13 @@ If your implementation uses Classes to denote Agents, this decorator enables aut
Learn more about tracking agents [here](/v1/usage/tracking-agents).
## `@record_action()`
-Sometimes your agent system will use functions that are important to track as [`Actions`](/v1/concepts/events/#actionevent).
+Sometimes, your agent system uses functions that are important to track as [`Actions`](/v1/concepts/events/#actionevent).
Adding this decorator above any function will allow every instance of that function call to be tracked and displayed
in your [Session](v1/concepts/sessions) Drill-Down on the dashboard.
## `@record_tool()`
-Some functions are used as Tools. If you're not using an agent framework that records [`ToolEvents`](/v1/concepts/events/#toolevent) with AgentOps automatically, this decorator will record `ToolEvents` when the function is called.
+Some functions are used as Tools. If you are not using an agent framework that records [`ToolEvents`](/v1/concepts/events/#toolevent) with AgentOps automatically, this decorator will record `ToolEvents` when the function is called.
Adding this decorator above any function will allow every instance of that function call to be tracked and displayed
in your [Session](v1/concepts/sessions) Drill-Down on the dashboard.
diff --git a/docs/v1/concepts/sessions.mdx b/docs/v1/concepts/sessions.mdx
index 25f95f88..1da20fb3 100644
--- a/docs/v1/concepts/sessions.mdx
+++ b/docs/v1/concepts/sessions.mdx
@@ -58,7 +58,7 @@ Calling `agentops.init(auto_start_session=False)` will initialize the AgentOps S
To start a session later, call `agentops.start_session()` [(reference)](/v1/usage/sdk-reference/#start-session)
-Both `agentops.init()` and `agentops.start_session()` works as a factory pattern and returns a `Session` object. The above methods can all be called on this session object.
+Both `agentops.init()` and `agentops.start_session()` work as a factory pattern and return a `Session` object. The above methods can all be called on this session object.
## Ending a Session
If a process ends without any call to agentops, it will show in the dashboard as `Indeterminate`.
@@ -71,7 +71,7 @@ with an existing session_id.
`agentops.init(inherited_session_id=)`
`agentops.start_session(inherited_session_id=)`
-You can retrieve the current session_id by assigning the returned value from `init()` or `start_session()`
+You can retrieve the current `session_id` by assigning the returned value from `init()` or `start_session()`
```python python
@@ -90,8 +90,7 @@ Both processes will now contribute data to the same session.
## The AgentOps SDK Client
_More info for the curious_
-Under the hood, `agentops.init()` sets up a `Client` object with various configuration options like your API key, worker thread options
-for when to send out batches of events, etc. Whenever you start a new session, these configuration options will automatically
+Under the hood, `agentops.init()` creates a `Client` object with various configuration options. Whenever you start a new session, these configuration options will automatically
be applied. You can also apply different configuration options when you start a new session by passing in a
[Configuration](/v1/usage/sdk-reference/#configuration) object.
diff --git a/docs/v1/examples/langchain.mdx b/docs/v1/examples/langchain.mdx
index bd121bb0..5e6c73a4 100644
--- a/docs/v1/examples/langchain.mdx
+++ b/docs/v1/examples/langchain.mdx
@@ -1,9 +1,8 @@
---
-title: 'Langchain Example'
-description: 'Using the Langchain Callback Handler'
+title: 'LangChain Example'
+description: 'Using the LangChain Callback Handler'
mode: "wide"
---
_View Notebook on Github_
-
-
\ No newline at end of file
+{/* SOURCE_FILE: examples/langchain_examples.ipynb */}
\ No newline at end of file
diff --git a/docs/v1/examples/multi_agent.mdx b/docs/v1/examples/multi_agent.mdx
index 6ceb1deb..40133490 100644
--- a/docs/v1/examples/multi_agent.mdx
+++ b/docs/v1/examples/multi_agent.mdx
@@ -5,5 +5,4 @@ mode: "wide"
---
_View Notebook on Github_
-
-
+{/* SOURCE_FILE: examples/multi_agent_example.ipynb */}
\ No newline at end of file
diff --git a/docs/v1/examples/multi_session.mdx b/docs/v1/examples/multi_session.mdx
index 976727eb..794a7865 100644
--- a/docs/v1/examples/multi_session.mdx
+++ b/docs/v1/examples/multi_session.mdx
@@ -5,5 +5,4 @@ mode: "wide"
---
_View Notebook on Github_
-
-
\ No newline at end of file
+{ /* SOURCE_FILE: examples/multi_session_llm.ipynb */}
\ No newline at end of file
diff --git a/docs/v1/examples/multion.mdx b/docs/v1/examples/multion.mdx
index f7a90a82..6b1cbefe 100644
--- a/docs/v1/examples/multion.mdx
+++ b/docs/v1/examples/multion.mdx
@@ -4,11 +4,6 @@ description: 'Tracking Multion usage with AgentOps'
mode: "wide"
---
-
-
-
-
-
-
-
-
\ No newline at end of file
+_View All Notebooks on Github_
+
+{/* SOURCE_FILE: examples/multion_examples/Autonomous_web_browsing.ipynb */}
diff --git a/docs/v1/examples/recording_events.mdx b/docs/v1/examples/recording_events.mdx
index 23773521..7d08f7f7 100644
--- a/docs/v1/examples/recording_events.mdx
+++ b/docs/v1/examples/recording_events.mdx
@@ -5,5 +5,4 @@ mode: "wide"
---
_View Notebook on Github_
-
-
\ No newline at end of file
+{/* SOURCE_FILE: examples/recording-events.ipynb */}
\ No newline at end of file
diff --git a/docs/v1/examples/simple_agent.mdx b/docs/v1/examples/simple_agent.mdx
index c5f599cf..07a41318 100644
--- a/docs/v1/examples/simple_agent.mdx
+++ b/docs/v1/examples/simple_agent.mdx
@@ -5,5 +5,4 @@ mode: "wide"
---
_View Notebook on Github_
-
-
+{/* SOURCE_FILE: examples/openai-gpt.ipynb */}
diff --git a/docs/v1/integrations/cohere.mdx b/docs/v1/integrations/cohere.mdx
index f80cad05..35771a8a 100644
--- a/docs/v1/integrations/cohere.mdx
+++ b/docs/v1/integrations/cohere.mdx
@@ -7,7 +7,7 @@ import CodeTooltip from '/snippets/add-code-tooltip.mdx'
import EnvTooltip from '/snippets/add-env-tooltip.mdx'
-This is a living integration. Should you need any added functionality message us on [Discord](https://discord.gg/UgJyyxx7uc)!
+This is a living integration. Should you need any added functionality, message us on [Discord](https://discord.gg/UgJyyxx7uc)!
@@ -41,7 +41,7 @@ This is a living integration. Should you need any added functionality message us
- Requires cohere>=5.4.0
+ Requires `cohere>=5.4.0`
@@ -54,7 +54,7 @@ This is a living integration. Should you need any added functionality message us
- Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Agents! 🕵️
+ Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Agent! 🕵️
After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard
diff --git a/docs/v1/integrations/langchain.mdx b/docs/v1/integrations/langchain.mdx
index 575091f3..1b310691 100644
--- a/docs/v1/integrations/langchain.mdx
+++ b/docs/v1/integrations/langchain.mdx
@@ -1,16 +1,16 @@
---
-title: Langchain
-description: "AgentOps provides first class support for Lanchain applications"
+title: LangChain
+description: "AgentOps provides first class support for LangChain applications"
---
import EnvTooltip from '/snippets/add-env-tooltip.mdx'
-AgentOps works seamlessly with applications built using Langchain.
+AgentOps works seamlessly with applications built using LangChain.
-## Adding AgentOps to Langchain applications
+## Adding AgentOps to LangChain applications
-
+
```bash pip
pip install agentops
@@ -24,7 +24,7 @@ AgentOps works seamlessly with applications built using Langchain.
[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)
- Import the following Langchain and AgentOps dependencies
+ Import the following LangChain and AgentOps dependencies
```python python
import os
@@ -34,15 +34,15 @@ AgentOps works seamlessly with applications built using Langchain.
```
-
+
- Note that you don't need to set up a separate agentops.init() call, as the Langchain callback handler will automatically initialize the AgentOps client for you.
+ Note that you don't need to set up a separate agentops.init() call, as the LangChain callback handler will automatically initialize the AgentOps client for you.
- Set up your Langchain agent with the AgentOps callback handler and AgentOps will automatically record your Langchain sessions.
+ Set up your LangChain agent with the AgentOps callback handler, and AgentOps will automatically record your LangChain sessions.
```python python
- handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example'])
+ handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['LangChain Example'])
@@ -70,9 +70,9 @@ AgentOps works seamlessly with applications built using Langchain.
- Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Langchain Agent! 🕵️
+ Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your LangChain Agent! 🕵️
- After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard
+ After your run, AgentOps prints a clickable URL to the console linking directly to your session in the Dashboard
{/* Intentionally blank div for newline */}
@@ -90,7 +90,7 @@ AgentOps works seamlessly with applications built using Langchain.
from langchain.agents import initialize_agent, AgentType
from agentops.langchain_callback_handler import LangchainCallbackHandler
- handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['Langchain Example'])
+ handler = LangchainCallbackHandler(api_key=AGENTOPS_API_KEY, tags=['LangChain Example'])
llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY,
callbacks=[handler],
diff --git a/docs/v1/integrations/litellm.mdx b/docs/v1/integrations/litellm.mdx
index e82916c5..ed851b84 100644
--- a/docs/v1/integrations/litellm.mdx
+++ b/docs/v1/integrations/litellm.mdx
@@ -1,27 +1,27 @@
---
title: LiteLLM
description: "Call the latest models using the OpenAI format including:
-Llama, Mistral, Claude, Gemini, Gemma, Dall-E, Whisper"
+Llama, Mistral, Claude, Gemini, Gemma, DALL-E, Whisper"
---
## LiteLLM
From [LiteLLM's docs](https://docs.litellm.ai/docs/):
-Call 100+ LLMs using the same Input/Output Format
+Call 400+ LLMs using the same input/output Format
- Translate inputs to provider's `completion`, `embedding`, and `image_generation` endpoints
-- Consistent output. Text responses will always be available at `['choices'][0]['message']['content']`
+- Consistent output. Text responses will always be available at `['choices'][0]['message']['content']`.
- Retry/fallback logic across multiple deployments (e.g. Azure/OpenAI)
- Track spend & set budgets per project
-LiteLLM also supports many [providers](https://docs.litellm.ai/docs/providers)
+LiteLLM also supports many [providers](https://docs.litellm.ai/docs/providers).
## Using AgentOps with LiteLLM
-### Requires litellm>=1.3.1
+### Requires `litellm>=1.3.1`
-AgentOps requires you to make a minor adjustment to how you call LiteLLM.
+AgentOps requires a minor adjustment to how you call LiteLLM.
```python python
# Do not use LiteLLM like this
diff --git a/docs/v1/introduction.mdx b/docs/v1/introduction.mdx
index e7992adc..db5367b4 100644
--- a/docs/v1/introduction.mdx
+++ b/docs/v1/introduction.mdx
@@ -32,7 +32,7 @@ And we do it all in just two lines of code...
## The AgentOps Dashboard
-With just two lines of code, you can free yourself from the chains of the terminal and instead visualize your agents' behavior
+With just two lines of code, you can free yourself from the chains of the terminal and, instead, visualize your agents' behavior
in your AgentOps Dashboard. After setting up AgentOps, each execution of your program is recorded as a session and the above
data is automatically recorded for you.
@@ -53,7 +53,7 @@ Find any past sessions from your Session Drawer.
Most powerful of all is the Session Waterfall. On the left, a time visualization of all your LLM calls, Action events, Tool calls, and Errors.
-On the right, specific details about the event you've selected on the waterfall. For instance the exact prompt and completion for a given LLM call.
+On the right, specific details about the event you've selected on the waterfall. For instance the exact prompt and completion for a given LLM call.
Most of which has been automatically recorded for you.
diff --git a/docs/v1/quickstart.mdx b/docs/v1/quickstart.mdx
index 3bc5e242..5df0c48c 100644
--- a/docs/v1/quickstart.mdx
+++ b/docs/v1/quickstart.mdx
@@ -32,7 +32,7 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx'
Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Agent! 🕵️
- After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard
+ After your run, AgentOps prints a clickable URL to console linking directly to your session in the Dashboard
{/* Intentionally blank div for newline */}
@@ -52,7 +52,7 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx'
will see these function calls alongside your LLM calls from instantiating the AgentOps client.
```python python
# (record specific functions)
- @agentops.record_action('sample function being record')
+ @agentops.record_action('sample function being recorded')
def sample_function(...):
...
```
@@ -70,7 +70,7 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx'
- Finally, you should end your session by calling `.end_session()` with whether your session
+ Finally, you should end your session by calling `.end_session()` indicating whether your session
was successful or not `(Success|Fail)`. We suggest setting session state depending on how
your agent exits or whether your agent succeeded or not. You can also specify a end state reason,
such as user interrupted, ran to completion, or unhandled exception.
diff --git a/docs/v1/usage/langchain-callback-handler.mdx b/docs/v1/usage/langchain-callback-handler.mdx
index 9b36644d..8337a287 100644
--- a/docs/v1/usage/langchain-callback-handler.mdx
+++ b/docs/v1/usage/langchain-callback-handler.mdx
@@ -1,12 +1,12 @@
---
-title: 'Langchain Callback Handler'
-description: 'How to use AgentOps with Langchain'
+title: 'LangChain Callback Handler'
+description: 'How to use AgentOps with LangChain'
---
-By default, AgentOps is compatible with agents using Langchain with our LLM Instrumentor as long as they're using
+By default, AgentOps is compatible with agents using LangChain with our LLM Instrumentor as long as they're using
supported models.
-As an alternative to instrumenting, the Langchain Callback Handler is available.
+As an alternative to instrumenting, the LangChain Callback Handler is available.
## Constructor
@@ -41,7 +41,7 @@ properly include `instrument_llm_calls=False`. In this case, call
### Implement Callback Handler
-Initialize the handler with its constructor and pass it into the callbacks array from Langchain.
+Initialize the handler with its constructor and pass it into the callbacks array from LangChain.
```python
from agentops.langchain_callback_handler import LangchainCallbackHandler
ChatOpenAI(callbacks=[LangchainCallbackHandler()])
@@ -63,9 +63,9 @@ response = chain.invoke({"animal": "bears"})
## Why use the handler?
-If your project uses Langchain for Agents, Events and Tools, it may be easier to use the callback Handler for observability.
+If your project uses LangChain for Agents, Events and Tools, it may be easier to use the callback Handler for observability.
-If your project uses models with Langchain that are not yet supported by AgentOps, they can be supported by the Handler.
+If your project uses models with LangChain that are not yet supported by AgentOps, they can be supported by the Handler.
diff --git a/docs/v1/usage/multiple-sessions.mdx b/docs/v1/usage/multiple-sessions.mdx
index 38c29aac..f84c18a6 100644
--- a/docs/v1/usage/multiple-sessions.mdx
+++ b/docs/v1/usage/multiple-sessions.mdx
@@ -159,7 +159,7 @@ session.record(Event(...))
# Assigning LLM Calls
When we have multiple active sessions, it's impossible for AgentOps to know which session a particular LLM call belongs to without a little help.
-To track an LLM Call, use [`session.patch()`](/v1/concepts/sessions#patch)
+To track an LLM call, use [`session.patch()`](/v1/concepts/sessions#patch)
```python
import agentops
diff --git a/docs/v1/usage/recording-events.mdx b/docs/v1/usage/recording-events.mdx
index f7fec071..361188ec 100644
--- a/docs/v1/usage/recording-events.mdx
+++ b/docs/v1/usage/recording-events.mdx
@@ -16,12 +16,12 @@ and record an event for your function.
```python python
from agentops import record_action
-@record_action('sample function being record')
+@record_action('sample function being recorded')
def sample_function(...):
...
```
-The decorator will record the function's parameters, returns, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several openai calls, then each openai call will show in the replay graph as a child of the decorated function.
+The decorator will record the function's parameters, return values, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several OpenAI calls, then each OpenAI call will show in the replay graph as a child of the decorated function.
## `@record_tool` Decorator
@@ -37,12 +37,12 @@ def sample_tool(...):
...
```
-The decorator will record the function's parameters, returns, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several openai calls, then each openai call will show in the replay graph as a child of the decorated function.
+The decorator will record the function's parameters, returns, and the time duration. We suggest using this on functions that take a long time and contain nested functions. For example, if you decorate a function that makes several OpenAI calls, then each OpenAI call will show in the replay graph as a child of the decorated function.
## `record()` Method
-From this point, simply call the .record() method in the AgentOps client:
+From this point, simply call the `.record()` method in the AgentOps client:
Record any child of the [Event type](/v1/concepts/events) or ErrorEvent.
diff --git a/docs/v1/usage/sdk-reference.mdx b/docs/v1/usage/sdk-reference.mdx
index 647ba101..876aee69 100644
--- a/docs/v1/usage/sdk-reference.mdx
+++ b/docs/v1/usage/sdk-reference.mdx
@@ -51,7 +51,7 @@ Start a new [Session](/v1/concepts/sessions) for recording events.
### `end_session()`
-End the current session with the AgentOps service.
+Ends the current session with the AgentOps service.
**Parameters**:
@@ -113,8 +113,8 @@ Set the parent API key which has visibility over projects it is a parent of.
### `stop_instrumenting()`
-Stop instrumenting LLM calls. This is typically used by agent frameworks (i.e. [CrewAI](/v1/integrations/crewai),
-[autogen](/v1/integrations/autogen)) to stop using the AgentOps auto instrumentation of LLM libraries like OpenAI. This
+Stops instrumenting LLM calls. This is typically used by agent frameworks (i.e., [CrewAI](/v1/integrations/crewai) and
+[autogen](/v1/integrations/autogen)) to stop using AgentOps' auto-instrumentation of LLM libraries such as OpenAI. This
allows these frameworks to use their own instrumenting or callback handler.
@@ -151,7 +151,7 @@ Stores the configuration settings for AgentOps clients.
[Reference](/v1/usage/langchain-callback-handler)
This callback handler is intended to be used as an option in place of AgentOps auto-instrumenting. This is only useful
-when using Langchain as your LLM calling library.
+when using LangChain as your LLM calling library.
diff --git a/docs/v1/usage/tracking-llm-calls.mdx b/docs/v1/usage/tracking-llm-calls.mdx
index 9cb93ab8..b448fd18 100644
--- a/docs/v1/usage/tracking-llm-calls.mdx
+++ b/docs/v1/usage/tracking-llm-calls.mdx
@@ -14,7 +14,7 @@ Try these steps:
1. Make sure you have the latest version of the AgentOps SDK installed. We are constantly updating it to support new LLM libraries and releases.
2. Make sure you are calling `agentops.init()` *after* importing the LLM module but *before* you are calling the LLM method.
3. Make sure the `instrument_llm_calls` parameter of `agentops.init()` is set to `True` (default).
-4. Make sure if you have more than one concurrent session, to patch the LLM call as described [here](/v1/usage/multiple-sssions).
+4. Make sure if you have more than one concurrent session, you patch the LLM call as described [here](/v1/usage/multiple-sssions).
Still not working? Please let us know! You can find us on [Discord](https://discord.gg/DR2abmETjZ),
[GitHub](https://github.com/AgentOps-AI/agentops),
@@ -32,7 +32,7 @@ To get started, just follow the quick start guide.
To stop tracking LLM calls after running `agentops.init()`, you can call `agentops.stop_instrumenting()`.
-This function reverts the changes made to your LLM Provider's module, removing AgentOps instrumentation.
+This function reverts the changes made to your LLM provider's module, removing AgentOps instrumentation.
_Special consideration for Cohere: Calling `stop_instrumenting()` has no effect on previously instantiated Cohere clients. You must create a new Cohere client after calling this function._
diff --git a/examples/anthropic-sdk/anthropic_example.ipynb b/examples/anthropic_examples/anthropic_example.ipynb
similarity index 100%
rename from examples/anthropic-sdk/anthropic_example.ipynb
rename to examples/anthropic_examples/anthropic_example.ipynb
diff --git a/examples/autogen/AgentChat.ipynb b/examples/autogen_examples/AgentChat.ipynb
similarity index 99%
rename from examples/autogen/AgentChat.ipynb
rename to examples/autogen_examples/AgentChat.ipynb
index 4de700ab..85fd4991 100644
--- a/examples/autogen/AgentChat.ipynb
+++ b/examples/autogen_examples/AgentChat.ipynb
@@ -146,6 +146,7 @@
"except StdinNotImplementedError:\n",
" # This is only necessary for AgentOps testing automation which is headless and will not have user input\n",
" print(\"Stdin not implemented. Skipping initiate_chat\")\n",
+ " agentops.end_session(\"Indeterminate\")\n",
"\n",
"# Close your AgentOps session to indicate that it completed.\n",
"agentops.end_session(\"Success\")\n",
diff --git a/examples/autogen/MathAgent.ipynb b/examples/autogen_examples/MathAgent.ipynb
similarity index 99%
rename from examples/autogen/MathAgent.ipynb
rename to examples/autogen_examples/MathAgent.ipynb
index bf542594..13bf58a8 100644
--- a/examples/autogen/MathAgent.ipynb
+++ b/examples/autogen_examples/MathAgent.ipynb
@@ -195,6 +195,7 @@
"except StdinNotImplementedError:\n",
" # This is only necessary for AgentOps testing automation which is headless and will not have user input\n",
" print(\"Stdin not implemented. Skipping initiate_chat\")\n",
+ " agentops.end_session(\"Indeterminate\")\n",
"\n",
"agentops.end_session(\"Success\")"
]
diff --git a/examples/cohere-sdk/cohere_example.ipynb b/examples/cohere_examples/cohere_example.ipynb
similarity index 100%
rename from examples/cohere-sdk/cohere_example.ipynb
rename to examples/cohere_examples/cohere_example.ipynb
diff --git a/examples/crew/README.md b/examples/crewai_examples/README.md
similarity index 100%
rename from examples/crew/README.md
rename to examples/crewai_examples/README.md
diff --git a/examples/crew/job_posting.ipynb b/examples/crewai_examples/job_posting.ipynb
similarity index 95%
rename from examples/crew/job_posting.ipynb
rename to examples/crewai_examples/job_posting.ipynb
index eb3faaaa..4c118ac4 100644
--- a/examples/crew/job_posting.ipynb
+++ b/examples/crewai_examples/job_posting.ipynb
@@ -41,7 +41,10 @@
"from crewai_tools.tools import WebsiteSearchTool, SerperDevTool, FileReadTool\n",
"import agentops\n",
"import os\n",
- "from dotenv import load_dotenv"
+ "from dotenv import load_dotenv\n",
+ "from IPython.core.error import (\n",
+ " StdinNotImplementedError,\n",
+ ") # only needed by AgentOps testing automation"
]
},
{
@@ -244,8 +247,13 @@
" ],\n",
")\n",
"\n",
- "# Kick off the process\n",
- "result = crew.kickoff()\n",
+ "try:\n",
+ " # Kick off the process\n",
+ " result = crew.kickoff()\n",
+ "except StdinNotImplementedError:\n",
+ " # This is only necessary for AgentOps testing automation which is headless and will not have user input\n",
+ " print(\"Stdin not implemented. Skipping kickoff()\")\n",
+ " agentops.end_session(\"Indeterminate\")\n",
"\n",
"print(\"Job Posting Creation Process Completed.\")\n",
"print(\"Final Job Posting:\")\n",
diff --git a/examples/crew/markdown_validator.ipynb b/examples/crewai_examples/markdown_validator.ipynb
similarity index 100%
rename from examples/crew/markdown_validator.ipynb
rename to examples/crewai_examples/markdown_validator.ipynb
diff --git a/examples/demos/agentchat_agentops.ipynb b/examples/demos/agentchat_agentops.ipynb
index 7b0d7b18..2aa7a84e 100644
--- a/examples/demos/agentchat_agentops.ipynb
+++ b/examples/demos/agentchat_agentops.ipynb
@@ -196,6 +196,7 @@
"except StdinNotImplementedError:\n",
" # This is only necessary for AgentOps testing automation which is headless and will not have user input\n",
" print(\"Stdin not implemented. Skipping initiate_chat\")\n",
+ " agentops.end_session(\"Indeterminate\")\n",
"\n",
"# Close your AgentOps session to indicate that it completed.\n",
"agentops.end_session(\"Success\")"
diff --git a/examples/langchain/langchain_examples.ipynb b/examples/langchain_examples/langchain_examples.ipynb
similarity index 98%
rename from examples/langchain/langchain_examples.ipynb
rename to examples/langchain_examples/langchain_examples.ipynb
index 361064c6..0e22a013 100644
--- a/examples/langchain/langchain_examples.ipynb
+++ b/examples/langchain_examples/langchain_examples.ipynb
@@ -25,6 +25,7 @@
"outputs": [],
"source": [
"%pip install langchain==0.2.9\n",
+ "%pip install langchain_openai\n",
"%pip install -U agentops\n",
"%pip install -U python-dotenv"
]
@@ -148,6 +149,10 @@
},
"outputs": [],
"source": [
+ "agentops_handler = AgentOpsLangchainCallbackHandler(\n",
+ " api_key=AGENTOPS_API_KEY, default_tags=[\"Langchain Example\"]\n",
+ ")\n",
+ "\n",
"llm = ChatOpenAI(\n",
" openai_api_key=OPENAI_API_KEY, callbacks=[agentops_handler], model=\"gpt-3.5-turbo\"\n",
")\n",
@@ -721,7 +726,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.12.5"
+ "version": "3.12.0"
}
},
"nbformat": 4,
diff --git a/examples/litellm-sdk/litellm_example.ipynb b/examples/litellm_examples/litellm_example.ipynb
similarity index 100%
rename from examples/litellm-sdk/litellm_example.ipynb
rename to examples/litellm_examples/litellm_example.ipynb
diff --git a/examples/multion/Autonomous_web_browsing.ipynb b/examples/multion_examples/Autonomous_web_browsing.ipynb
similarity index 100%
rename from examples/multion/Autonomous_web_browsing.ipynb
rename to examples/multion_examples/Autonomous_web_browsing.ipynb
diff --git a/examples/multion/Sample_browsing_agent.ipynb b/examples/multion_examples/Sample_browsing_agent.ipynb
similarity index 100%
rename from examples/multion/Sample_browsing_agent.ipynb
rename to examples/multion_examples/Sample_browsing_agent.ipynb
diff --git a/examples/multion/Step_by_step_web_browsing.ipynb b/examples/multion_examples/Step_by_step_web_browsing.ipynb
similarity index 100%
rename from examples/multion/Step_by_step_web_browsing.ipynb
rename to examples/multion_examples/Step_by_step_web_browsing.ipynb
diff --git a/examples/multion/Webpage_data_retrieval.ipynb b/examples/multion_examples/Webpage_data_retrieval.ipynb
similarity index 100%
rename from examples/multion/Webpage_data_retrieval.ipynb
rename to examples/multion_examples/Webpage_data_retrieval.ipynb
diff --git a/pyproject.toml b/pyproject.toml
index 9f366157..f3d7b9fa 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "agentops"
-version = "0.3.10"
+version = "0.3.11"
authors = [
{ name="Alex Reibman", email="areibman@gmail.com" },
{ name="Shawn Qiu", email="siyangqiu@gmail.com" },
diff --git a/tests/test_pre_init.py b/tests/test_pre_init.py
new file mode 100644
index 00000000..f87219ac
--- /dev/null
+++ b/tests/test_pre_init.py
@@ -0,0 +1,57 @@
+import pytest
+import requests_mock
+import time
+import agentops
+from agentops import record_action, track_agent
+from datetime import datetime
+from agentops.singleton import clear_singletons
+import contextlib
+
+jwts = ["some_jwt", "some_jwt2", "some_jwt3"]
+
+
+@pytest.fixture(autouse=True)
+def setup_teardown():
+ clear_singletons()
+ yield
+ agentops.end_all_sessions() # teardown part
+
+
+@contextlib.contextmanager
+@pytest.fixture(autouse=True)
+def mock_req():
+ with requests_mock.Mocker() as m:
+ url = "https://api.agentops.ai"
+ m.post(url + "/v2/create_agent", text="ok")
+ m.post(url + "/v2/update_session", text="ok")
+ m.post(
+ url + "/v2/create_session", json={"status": "success", "jwt": "some_jwt"}
+ )
+
+ yield m
+
+
+@track_agent(name="TestAgent")
+class BasicAgent:
+ def __init__(self):
+ pass
+
+
+class TestPreInit:
+ def setup_method(self):
+ self.url = "https://api.agentops.ai"
+ self.api_key = "11111111-1111-4111-8111-111111111111"
+
+ def test_track_agent(self, mock_req):
+ agent = BasicAgent()
+
+ assert len(mock_req.request_history) == 0
+
+ agentops.init(api_key=self.api_key)
+
+ # Assert
+ # start session and create agent
+ assert len(mock_req.request_history) == 2
+ assert mock_req.last_request.headers["X-Agentops-Api-Key"] == self.api_key
+
+ agentops.end_session(end_state="Success")