Skip to content

Prototype: A2A support through a protocols abstraction #171

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 52 additions & 0 deletions examples/iss_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""ISS Location Agent Server

This agent can answer questions about the International Space Station's location
and calculate distances to various cities.

Run with: uv run examples/iss.py
Then test with: uv run examples/iss_client.py
"""

from strands import Agent
from strands_tools import http_request, python_repl
from strands.protocols import A2AProtocolServer

# Create the ISS agent with tools for web requests and calculations
agent = Agent(
tools=[http_request, python_repl],
system_prompt="You are a helpful assistant that can answer questions about the International Space Station's location and calculate distances to various cities.",
name="ISS Location Agent",
description="An intelligent agent that tracks the International Space Station's real-time position and calculates distances to cities worldwide. Provides accurate geospatial analysis and space-related information.",
# Uncomment to use a specific model:
# model="us.amazon.nova-premier-v1:0",
# model="us.anthropic.claude-sonnet-4-20250514-v1:0",
)

# Configure the A2A server
server_config = A2AProtocolServer(
port=8000,
host="0.0.0.0",
version="1.2.3"
)

print(f"Starting ISS Location Agent...")
print(f"Model: {agent.model.config}")

# Serve the agent - it's now ready to handle requests!
server = agent.serve(server_config)

print("\n" + "="*50)
print("ISS Agent is now running!")
print(f"- Agent card: http://localhost:8000/.well-known/agent.json")
print(f"- Send requests to: http://localhost:8000/")
print("- Test with: uv run examples/iss_client.py")
print("="*50)

# Keep the server running
try:
import time
while True:
time.sleep(1)
except KeyboardInterrupt:
print("\n\nShutting down ISS agent...")
server.stop()
72 changes: 72 additions & 0 deletions examples/iss_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
"""Example client for the ISS agent.

This shows how to interact with the ISS agent once it's running.
First run: uv run examples/iss_agent.py
Then run: uv run examples/iss_client.py
"""

import time
from strands.protocols import A2AProtocolClient
import asyncio

# The URL where your ISS agent is running
AGENT_URL = "http://localhost:8000"

async def test_agent():
"""Test the ISS agent with better error handling."""

# Check if server is running first
print("Checking if agent server is running...")

async with A2AProtocolClient(AGENT_URL) as client:
try:
# Try to fetch agent card with shorter timeout first
print("Fetching agent card...")
agent_card = await client.fetch_agent_card()
print(f"\n✅ Connected to agent!")
print(f"Agent: {agent_card.name}")
print(f"Description: {agent_card.description}")
print(f"Available skills: {[skill.name for skill in agent_card.skills]}")

except Exception as e:
print(f"❌ Failed to connect to agent at {AGENT_URL}")
print(f"Error: {e}")
print("\nMake sure the agent server is running:")
print(" uv run examples/iss_agent.py")
return

# Now send your ISS question with longer timeout
print("\n" + "="*50)
print("Sending ISS question to agent...")
print("This may take a while as the agent needs to:")
print("- Look up real-time ISS position")
print("- Calculate distances to multiple cities")
print("- Perform complex calculations")
print("="*50 + "\n")

try:
# Use longer timeout for complex calculation
response = await client.send_task_and_wait(
message="Who is the closest to the ISS? People in: "
"Portland, Vancouver, Seattle, or New York? "
"First, lookup realtime information about the position of the ISS. "
"Give me the altitude of the ISS, and the distance and vector from the closest city to the ISS. "
"After you give me the answer, explain your reasoning and show me any code you used",
timeout=120.0 # 2 minutes for complex calculation
)

print("🚀 ISS Agent Response:")
print("="*50)
print(response)

except TimeoutError as e:
print(f"⏱️ Request timed out: {e}")
print("The agent may be taking longer than expected.")
print("Try again or increase the timeout.")

except Exception as e:
print(f"❌ Error during request: {e}")

# Run the async client
if __name__ == "__main__":
asyncio.run(test_agent())
10 changes: 8 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ name = "strands-agents"
dynamic = ["version"]
description = "A model-driven approach to building AI agents in just a few lines of code"
readme = "README.md"
requires-python = ">=3.10"
requires-python = ">=3.11"
license = {text = "Apache-2.0"}
authors = [
{name = "AWS", email = "opensource@amazon.com"},
Expand Down Expand Up @@ -59,7 +59,7 @@ dev = [
"pytest>=8.0.0,<9.0.0",
"pytest-asyncio>=0.26.0,<0.27.0",
"ruff>=0.4.4,<0.5.0",
"swagger-parser>=1.0.2,<2.0.0",
"swagger-parser>=1.0.1,<2.0.0",
]
docs = [
"sphinx>=5.0.0,<6.0.0",
Expand All @@ -78,6 +78,12 @@ ollama = [
openai = [
"openai>=1.68.0,<2.0.0",
]
a2a = [
"a2a-sdk>=0.2.5",
"httpx>=0.27.0",
"fastapi>=0.68.0",
"uvicorn>=0.15.0"
]

[tool.hatch.version]
# Tells Hatch to use your version control system (git) to determine the version.
Expand Down
4 changes: 2 additions & 2 deletions src/strands/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
"""A framework for building, deploying, and managing AI agents."""

from . import agent, event_loop, models, telemetry, types
from . import agent, event_loop, models, telemetry, types, protocols
from .agent.agent import Agent
from .tools.decorator import tool
from .tools.thread_pool_executor import ThreadPoolExecutorWrapper

__all__ = ["Agent", "ThreadPoolExecutorWrapper", "agent", "event_loop", "models", "tool", "types", "telemetry"]
__all__ = ["Agent", "ThreadPoolExecutorWrapper", "agent", "event_loop", "models", "tool", "types", "telemetry", "protocols"]
Loading