Skip to content

Commit

Permalink
Merge branch 'feat/disable-agentops' of https://github.com/heethjain2…
Browse files Browse the repository at this point in the history
…1/agentops into feat/disable-agentops
  • Loading branch information
heethjain21 committed Sep 18, 2024
2 parents 9890a10 + acdb4f3 commit 8cc5ddf
Show file tree
Hide file tree
Showing 54 changed files with 482 additions and 305 deletions.
77 changes: 77 additions & 0 deletions .github/workflows/add-markdown-examples-to-docs.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
name: Add Notebook Examples to Docs

on:
push:
branches:
- main
paths:
- 'examples/**'
- 'docs/v1/examples/**'

permissions:
contents: write
pull-requests: write

jobs:
add-notebook-examples-to-docs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.x'

- name: Install dependencies
run: |
pip install jupyter nbconvert
- name: Convert notebooks to markdown and add to docs
run: |
set -x # Enable debug mode
for file in docs/v1/examples/*.mdx; do
echo "Processing file: $file"
source_file=$(grep -oP '(?<=\{/\* SOURCE_FILE: ).*(?= \*/\})' "$file" || true)
if [[ -z "$source_file" ]]; then
echo "Error: No source file found in $file, skipping..." >&2
continue
fi
echo "Source file: $source_file"
if [[ -f "$source_file" ]]; then
echo "Converting notebook to markdown"
jupyter nbconvert --to markdown "$source_file" || { echo "Error: Failed to convert $source_file" >&2; continue; }
markdown_file="${source_file%.ipynb}.md"
echo "Appending markdown to $file"
echo -e "\n\n" >> "$file"
cat "$markdown_file" >> "$file" || { echo "Error: Failed to append markdown to $file" >&2; continue; }
rm "$markdown_file" || { echo "Error: Failed to remove $markdown_file" >&2; continue; }
else
echo "Error: Source file not found: $source_file" >&2
fi
done
- name: Commit changes
run: |
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git add docs/v1/examples/*.mdx
git diff --quiet && git diff --staged --quiet || git commit -m "GitHub Action: Update examples in docs from notebooks"
- name: Create Pull Request
uses: peter-evans/create-pull-request@v5
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: Update examples in docs from notebooks
title: 'Update examples in docs from notebooks'
body: |
This PR updates the examples in the docs from the corresponding notebooks.
Please review the changes before merging.
branch: update-docs-examples
base: main
# - name: Push changes
# uses: ad-m/github-push-action@master
# with:
# github_token: ${{ secrets.GITHUB_TOKEN }}
# branch: main
10 changes: 8 additions & 2 deletions .github/workflows/codecov.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,15 @@ name: Codecov

on:
push:
branches: [ main ]
branches:
- main
paths:
- 'agentops/**'
pull_request:
branches: [ main ]
branches:
- main
paths:
- 'agentops/**'

jobs:
test:
Expand Down
6 changes: 6 additions & 0 deletions .github/workflows/python-testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,15 @@ on:
push:
branches:
- main
paths:
- 'agentops/**'
- 'tests/**'
pull_request:
branches:
- main
paths:
- 'agentops/**'
- 'tests/**'

jobs:
build:
Expand Down
8 changes: 6 additions & 2 deletions .github/workflows/tach-check.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@

name: Tach Check

on: [pull_request]
on:
pull_request:
paths:
- 'agentops/**'
- 'tests/**'
- 'examples/**'

jobs:
tach-check:
Expand Down
17 changes: 7 additions & 10 deletions .github/workflows/test-notebooks.yml
Original file line number Diff line number Diff line change
@@ -1,14 +1,7 @@
name: Test Notebooks
on:
push:
branches:
- main
paths:
- "agentops/**"
- "examples/**"
- "tests/**"
- ".github/workflows/test-notebooks.yml"
pull_request_target:
pull_request:
types: [closed]
branches:
- main
paths:
Expand Down Expand Up @@ -43,13 +36,17 @@ jobs:
echo "GROQ_API_KEY=${{ secrets.GROQ_API_KEY }}" >> .env
echo "MULTION_API_KEY=${{ secrets.MULTION_API_KEY }}" >> .env
echo "SERPER_API_KEY=${{ secrets.SERPER_API_KEY }}" >> .env
- name: Install AgentOps from main branch and remove agentops install from notebooks
run: |
pip install git+https://github.com/AgentOps-AI/agentops.git@main
find . -name '*.ipynb' -exec sed -i '/^%pip install.*agentops/d' {} +
- name: Run notebooks and check for errors
run: |
mkdir -p logs
exit_code=0
exclude_notebooks=(
"./examples/crew/job_posting.ipynb",
"./examples/crewai_examples/job_posting.ipynb",
"./examples/demos/agentchat_agentops.ipynb"
)
Expand Down
9 changes: 8 additions & 1 deletion agentops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
from .log_config import logger
from .session import Session
import threading
from importlib.metadata import version as get_version
from packaging import version

try:
from .partners.langchain_callback_handler import (
Expand All @@ -23,7 +25,12 @@
Client().add_default_tags(["autogen"])

if "crewai" in sys.modules:
Client().configure(instrument_llm_calls=False)
crew_version = version.parse(get_version("crewai"))

# uses langchain, greater versions will use litellm and default is to instrument
if crew_version < version.parse("0.56.0"):
Client().configure(instrument_llm_calls=False)

Client().add_default_tags(["crewai"])


Expand Down
19 changes: 18 additions & 1 deletion agentops/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def __init__(self):
self._llm_tracker: Optional[LlmTracker] = None
self._sessions: List[Session] = active_sessions
self._config = Configuration()
self._pre_init_queue = {"agents": []}

self.configure(
api_key=os.environ.get("AGENTOPS_API_KEY"),
Expand Down Expand Up @@ -111,6 +112,13 @@ def initialize(self) -> Union[Session, None]:
if self._config.auto_start_session:
session = self.start_session()

if session:
for agent_args in self._pre_init_queue["agents"]:
session.create_agent(
name=agent_args["name"], agent_id=agent_args["agent_id"]
)
self._pre_init_queue["agents"] = []

return session

def _initialize_partner_framework(self) -> None:
Expand Down Expand Up @@ -251,6 +259,13 @@ def start_session(
config=self._config,
)

if self._pre_init_queue["agents"] and len(self._pre_init_queue["agents"]) > 0:
for agent_args in self._pre_init_queue["agents"]:
session.create_agent(
name=agent_args["name"], agent_id=agent_args["agent_id"]
)
self._pre_init_queue["agents"] = []

if not session.is_running:
return logger.error("Failed to start session")

Expand Down Expand Up @@ -311,7 +326,9 @@ def create_agent(
# if no session passed, assume single session
session = self._safe_get_session()
if session is None:
return
self._pre_init_queue["agents"].append(
{"name": name, "agent_id": agent_id}
)
session.create_agent(name=name, agent_id=agent_id)

return agent_id
Expand Down
12 changes: 2 additions & 10 deletions agentops/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,12 +326,6 @@ def new_init(self, *args, **kwargs):

original_init(self, *args, **kwargs)

if not Client().is_initialized:
Client().add_pre_init_warning(
f"Failed to track an agent {name} because agentops.init() was not "
+ "called before initializing the agent with the @track_agent decorator."
)

self.agent_ops_agent_id = str(uuid4())

session = kwargs.get("session", None)
Expand All @@ -345,12 +339,10 @@ def new_init(self, *args, **kwargs):
)
except AttributeError as e:
Client().add_pre_init_warning(
f"Failed to track an agent {name} because agentops.init() was not "
+ "called before initializing the agent with the @track_agent decorator."
f"Failed to track an agent {name} with the @track_agent decorator."
)
logger.warning(
"Failed to track an agent. This often happens if agentops.init() was not "
"called before initializing an agent with the @track_agent decorator."
"Failed to track an agent with the @track_agent decorator."
)
original_init(self, *args, **kwargs)

Expand Down
1 change: 0 additions & 1 deletion agentops/llms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ class LlmTracker:

def __init__(self, client):
self.client = client
self.completion = ""

def override_api(self):
"""
Expand Down
50 changes: 24 additions & 26 deletions agentops/llms/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,27 +31,27 @@ def handle_response(
from anthropic.resources import AsyncMessages
from anthropic.types import Message

self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
if session is not None:
self.llm_event.session_id = session.session_id
llm_event.session_id = session.session_id

def handle_stream_chunk(chunk: Message):
try:
# We take the first chunk and accumulate the deltas from all subsequent chunks to build one full chat completion
if chunk.type == "message_start":
self.llm_event.returns = chunk
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.model = kwargs["model"]
self.llm_event.prompt = kwargs["messages"]
self.llm_event.prompt_tokens = chunk.message.usage.input_tokens
self.llm_event.completion = {
llm_event.returns = chunk
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.model = kwargs["model"]
llm_event.prompt = kwargs["messages"]
llm_event.prompt_tokens = chunk.message.usage.input_tokens
llm_event.completion = {
"role": chunk.message.role,
"content": "", # Always returned as [] in this instance type
}

elif chunk.type == "content_block_start":
if chunk.content_block.type == "text":
self.llm_event.completion["content"] += chunk.content_block.text
llm_event.completion["content"] += chunk.content_block.text

elif chunk.content_block.type == "tool_use":
self.tool_id = chunk.content_block.id
Expand All @@ -62,7 +62,7 @@ def handle_stream_chunk(chunk: Message):

elif chunk.type == "content_block_delta":
if chunk.delta.type == "text_delta":
self.llm_event.completion["content"] += chunk.delta.text
llm_event.completion["content"] += chunk.delta.text

elif chunk.delta.type == "input_json_delta":
self.tool_event[self.tool_id].logs[
Expand All @@ -73,15 +73,15 @@ def handle_stream_chunk(chunk: Message):
pass

elif chunk.type == "message_delta":
self.llm_event.completion_tokens = chunk.usage.output_tokens
llm_event.completion_tokens = chunk.usage.output_tokens

elif chunk.type == "message_stop":
self.llm_event.end_timestamp = get_ISO_time()
self._safe_record(session, self.llm_event)
llm_event.end_timestamp = get_ISO_time()
self._safe_record(session, llm_event)

except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
session, ErrorEvent(trigger_event=llm_event, exception=e)
)

kwargs_str = pprint.pformat(kwargs)
Expand Down Expand Up @@ -124,23 +124,21 @@ async def async_generator():

# Handle object responses
try:
self.llm_event.returns = response.model_dump()
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt = kwargs["messages"]
self.llm_event.prompt_tokens = response.usage.input_tokens
self.llm_event.completion = {
llm_event.returns = response.model_dump()
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.prompt = kwargs["messages"]
llm_event.prompt_tokens = response.usage.input_tokens
llm_event.completion = {
"role": "assistant",
"content": response.content[0].text,
}
self.llm_event.completion_tokens = response.usage.output_tokens
self.llm_event.model = response.model
self.llm_event.end_timestamp = get_ISO_time()
llm_event.completion_tokens = response.usage.output_tokens
llm_event.model = response.model
llm_event.end_timestamp = get_ISO_time()

self._safe_record(session, self.llm_event)
self._safe_record(session, llm_event)
except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
)
self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
logger.warning(
Expand Down
Loading

0 comments on commit 8cc5ddf

Please sign in to comment.