Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
39cfe33
feat(openinference-core): decorators
mikeldking Oct 23, 2025
02cb8ad
codify input and output parsing
mikeldking Oct 23, 2025
260c81c
minimum working example
mikeldking Oct 24, 2025
793392a
minimum working example
mikeldking Oct 24, 2025
8ad36d0
add is promise utility
mikeldking Oct 24, 2025
30b9ae2
add promise type guard
mikeldking Oct 24, 2025
06709fe
refactor to helpers
mikeldking Oct 27, 2025
2e587c8
cleanup
mikeldking Oct 27, 2025
e7c6ccf
change signature
mikeldking Oct 27, 2025
4219b6a
make function wrapping work
mikeldking Oct 27, 2025
1e69a90
refactor
mikeldking Oct 27, 2025
bec1fc6
support promises
mikeldking Oct 27, 2025
0f249d1
make decorators re-use withSpan
mikeldking Oct 27, 2025
3c55e1b
add more helpers
mikeldking Oct 27, 2025
33c0ef5
lock file
mikeldking Oct 27, 2025
9fd6861
cleanup type guards
mikeldking Oct 27, 2025
fdd6f9e
add tests
mikeldking Oct 27, 2025
7c9fa10
cleanup
mikeldking Oct 27, 2025
4c0d497
make it match python
mikeldking Oct 27, 2025
f45aca8
cleanup utils
mikeldking Oct 27, 2025
60340f7
cleanup
mikeldking Oct 27, 2025
7f28266
cleanup
mikeldking Oct 27, 2025
992acdb
cleanup
mikeldking Oct 27, 2025
1708088
update more
mikeldking Oct 27, 2025
fd8109d
add tests
mikeldking Oct 27, 2025
b1469c3
fix importg
mikeldking Oct 27, 2025
b30dee3
cleanup
mikeldking Oct 27, 2025
7131d0b
cleanup
mikeldking Oct 27, 2025
78f3bbf
cleanup
mikeldking Oct 27, 2025
003900d
make types flexible
mikeldking Oct 28, 2025
633b764
add type inference
mikeldking Oct 28, 2025
5051a95
rename wrappers
mikeldking Oct 29, 2025
5699c43
update example
mikeldking Oct 29, 2025
2a83d16
cleanup
mikeldking Oct 29, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
295 changes: 295 additions & 0 deletions js/packages/openinference-core/examples/decorators_example.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,295 @@
import {
observe,
getLLMAttributes,
getEmbeddingAttributes,
getToolAttributes,
traceAgent,
getRetrieverAttributes,
getMetadataAttributes,
} from "../src";

import {
NodeTracerProvider,
SimpleSpanProcessor,
} from "@opentelemetry/sdk-trace-node";
import { resourceFromAttributes } from "@opentelemetry/resources";
import {
diag,
DiagConsoleLogger,
DiagLogLevel,
trace,
} from "@opentelemetry/api";
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-proto";
import { SEMRESATTRS_PROJECT_NAME } from "@arizeai/openinference-semantic-conventions";
// For troubleshooting, set the log level to DiagLogLevel.DEBUG
diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG);

// Create OTLP exporter with error handling

const provider = new NodeTracerProvider({
resource: resourceFromAttributes({
[SEMRESATTRS_PROJECT_NAME]: "ai-agent-example",
"service.name": "ai-agent-example",
}),
spanProcessors: [
new SimpleSpanProcessor(
new OTLPTraceExporter({
url: "http://localhost:6006/v1/traces",
}),
),
],
});

provider.register();

/**
* A simple AI agent that demonstrates various tracing scenarios
* including LLM interactions, embeddings, retrieval, and tool usage.
*/
class AIAgent {
private name: string;
private modelName: string;
private provider: string;

constructor(
name: string,
modelName: string = "gpt-4",
provider: string = "openai",
) {
this.name = name;
this.modelName = modelName;
this.provider = provider;
}

/**
* Generate embeddings for text input
*/
@observe({
kind: "EMBEDDING",
})
async generateEmbeddings(
texts: string[],
): Promise<Array<{ text: string; vector: number[] }>> {
// Simulate embedding generation
const embeddings = texts.map((text) => ({
text,
vector: Array.from({ length: 1536 }, () => Math.random() - 0.5), // Simulate 1536-dim vector
}));
const span = trace.getActiveSpan();
// Add embedding-specific attributes (demonstrates usage)
span?.setAttributes(
getEmbeddingAttributes({
modelName: "text-embedding-ada-002",
embeddings,
}),
);

return embeddings;
}

/**
* Retrieve relevant documents based on query
*/
@observe({
kind: "RETRIEVER",
})
async retrieveDocuments(
query: string,
topK: number = 5,
): Promise<
Array<{
content: string;
id: string;
score: number;
metadata?: Record<string, unknown>;
}>
> {
// Simulate document retrieval
const documents = [
{
content:
"Machine learning is a subset of artificial intelligence that focuses on algorithms.",
id: "doc_001",
score: 0.95,
metadata: { source: "wikipedia", category: "technology" },
},
{
content:
"Deep learning uses neural networks with multiple layers to process data.",
id: "doc_002",
score: 0.87,
metadata: { source: "research_paper", category: "ai" },
},
{
content:
"Natural language processing enables computers to understand human language.",
id: "doc_003",
score: 0.82,
metadata: { source: "textbook", category: "nlp" },
},
].slice(0, topK);
const span = trace.getActiveSpan();
span?.setAttributes(
getRetrieverAttributes({
documents,
}),
);
return documents;
}

/**
* Process user query with LLM
*/
@observe({
kind: "LLM",
})
async processQuery(
query: string,
context?: string[],
): Promise<{
response: string;
tokenCount: { prompt: number; completion: number; total: number };
}> {
// Simulate LLM processing
const inputMessages = [
{ role: "system", content: "You are a helpful AI assistant." },
{ role: "user", content: query },
];

if (context && context.length > 0) {
inputMessages.push({
role: "system",
content: `Context: ${context.join(" ")}`,
});
}

const response = `Based on your query "${query}", here's a comprehensive response that demonstrates AI capabilities.`;

const tokenCount = {
prompt: Math.floor(Math.random() * 100) + 50,
completion: Math.floor(Math.random() * 200) + 100,
total: 0,
};
tokenCount.total = tokenCount.prompt + tokenCount.completion;

// Add LLM-specific attributes (demonstrates usage)
const span = trace.getActiveSpan();
span?.setAttributes(
getLLMAttributes({
provider: this.provider,
modelName: this.modelName,
inputMessages,
outputMessages: [{ role: "assistant", content: response }],
tokenCount,
invocationParameters: {
temperature: 0.7,
max_tokens: 1000,
top_p: 0.9,
},
}),
);

return { response, tokenCount };
}

/**
* Use a tool to perform a specific action
*/
@observe({
kind: "TOOL",
})
async useTool(
toolName: string,
parameters: Record<string, unknown>,
): Promise<unknown> {
// Add tool-specific attributes (demonstrates usage)
const span = trace.getActiveSpan();
span?.setAttributes(
getToolAttributes({
name: toolName,
description: `Tool for ${toolName} operations`,
parameters,
}),
);

// Simulate different tool behaviors
switch (toolName) {
case "calculator":
return { result: (parameters.a as number) + (parameters.b as number) };
case "weather":
return {
temperature: 72,
condition: "sunny",
location: parameters.location,
};
case "search":
return { results: [`Search results for: ${parameters.query}`] };
default:
return { error: "Unknown tool" };
}
}

/**
* Complete AI workflow combining multiple operations
*/
@observe()
async processWorkflow(
userQuery: string,
): Promise<{ answer: string; sources: string[] }> {
// Step 1: Generate embeddings for the query
await this.generateEmbeddings([userQuery]);

// Step 2: Retrieve relevant documents
const documents = await this.retrieveDocuments(userQuery, 3);

// Step 3: Process with LLM using retrieved context
const context = documents.map((doc) => doc.content);
const llmResult = await this.processQuery(userQuery, context);

// Step 4: Use calculator tool if needed
if (userQuery.includes("calculate") || userQuery.includes("math")) {
await this.useTool("calculator", { a: 10, b: 20 });
}

return {
answer: llmResult.response,
sources: documents.map((doc) => doc.id),
};
}
}

// Example usage
async function runExample() {
const agent = new AIAgent("Sophia", "gpt-4", "openai");

try {
// Run a complete workflow
const result = await agent.processWorkflow(
"What is machine learning and how does it relate to deep learning?",
);

// Test individual components
const embeddings = await agent.generateEmbeddings(["test text"]);
const docs = await agent.retrieveDocuments("artificial intelligence", 2);
const toolResult = await agent.useTool("weather", {
location: "San Francisco",
});

return {
workflowResult: result,
embeddingsCount: embeddings.length,
documentsCount: docs.length,
toolResult,
};
} catch (error) {
throw new Error(`Error in workflow: ${error}`);
}
}

traceAgent(runExample, {
name: "my-agent",
attributes: getMetadataAttributes({
version: "1.0.0",
environment: "production",
}),
})();
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import {
getInputAttributes,
getRetrieverAttributes,
traceAgent,
withSpan,
} from "../src";

import {
NodeTracerProvider,
SimpleSpanProcessor,
ConsoleSpanExporter,
} from "@opentelemetry/sdk-trace-node";
import { resourceFromAttributes } from "@opentelemetry/resources";
import { diag, DiagConsoleLogger, DiagLogLevel } from "@opentelemetry/api";
import { SEMRESATTRS_PROJECT_NAME } from "@arizeai/openinference-semantic-conventions";
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-proto";

// For troubleshooting, set the log level to DiagLogLevel.DEBUG
diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG);

const provider = new NodeTracerProvider({
resource: resourceFromAttributes({
[SEMRESATTRS_PROJECT_NAME]: "function-wrapping-example",
"service.name": "function-wrapping-example",
}),
spanProcessors: [
new SimpleSpanProcessor(new ConsoleSpanExporter()),
new SimpleSpanProcessor(
new OTLPTraceExporter({
url: "http://localhost:6006/v1/traces",
}),
),
],
});
provider.register();

const retriever = withSpan(
(_query: string) => {
return ["The capital of France is Paris."];
},
{
kind: "RETRIEVER",
name: "retriever",
processInput: (query) => getInputAttributes(query),
processOutput: (documents) => {
return {
...getRetrieverAttributes({
documents: documents.map((document) => ({
content: document,
})),
}),
};
},
},
);

// simple RAG agent
const agent = traceAgent(
async (question: string) => {
const documents = await retriever(question);
return `Let me help you answer that: ${question} ${documents.join("\n")}`;
},
{
name: "agent",
},
);
async function main() {
await agent("What is the capital of France?");
}

main();
6 changes: 3 additions & 3 deletions js/packages/openinference-core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@
},
"devDependencies": {
"@opentelemetry/context-async-hooks": "^1.25.1",
"@opentelemetry/resources": "^1.19.0",
"@opentelemetry/sdk-trace-base": "^1.19.0",
"@opentelemetry/sdk-trace-node": "^1.19.0",
"@opentelemetry/exporter-trace-otlp-proto": "^0.207.0",
"@opentelemetry/resources": "^2.2.0",
"@opentelemetry/sdk-trace-node": "^2.2.0",
"@opentelemetry/semantic-conventions": "^1.19.0",
"@types/node": "^20.14.11",
"vitest": "^4.0.2"
Expand Down
Loading
Loading