Skip to content

Commit

Permalink
Merge branch 'main' into wfh/guidance_on_async
Browse files Browse the repository at this point in the history
  • Loading branch information
hinthornw committed Oct 9, 2024
2 parents e3e1101 + 550b28d commit 18ed4aa
Show file tree
Hide file tree
Showing 12 changed files with 326 additions and 60 deletions.
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
![NPM Version](https://img.shields.io/npm/v/langsmith?logo=npm)
[![JS Downloads](https://img.shields.io/npm/dm/langsmith)](https://www.npmjs.com/package/langsmith)

This repository contains the Python and Javascript SDK's for interacting with the [LangSmith platform](https://smith.langchain.com/).
This repository contains the Python and Javascript SDK's for interacting with the [LangSmith platform](https://smith.langchain.com/). Please see [LangSmith Documentation](https://docs.smith.langchain.com/)
for documentation about using the LangSmith platform and the client SDK.

LangSmith helps your team debug, evaluate, and monitor your language models and intelligent agents. It works
with any LLM Application, including a native integration with the [LangChain Python](https://github.com/langchain-ai/langchain) and [LangChain JS](https://github.com/langchain-ai/langchainjs) open source libraries.
Expand Down
4 changes: 2 additions & 2 deletions js/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "langsmith",
"version": "0.1.61",
"version": "0.1.63",
"description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.",
"packageManager": "yarn@1.22.19",
"files": [
Expand Down Expand Up @@ -127,7 +127,7 @@
"eslint-plugin-prettier": "^4.2.1",
"jest": "^29.5.0",
"langchain": "^0.3.2",
"openai": "^4.38.5",
"openai": "^4.67.3",
"prettier": "^2.8.8",
"ts-jest": "^29.1.0",
"ts-node": "^10.9.1",
Expand Down
52 changes: 46 additions & 6 deletions js/src/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ export interface ClientConfig {
hideOutputs?: boolean | ((outputs: KVMap) => KVMap);
autoBatchTracing?: boolean;
pendingAutoBatchedRunLimit?: number;
blockOnRootRunFinalization?: boolean;
fetchOptions?: RequestInit;
}

Expand Down Expand Up @@ -357,18 +358,22 @@ const handle429 = async (response?: Response) => {
};

export class Queue<T> {
items: [T, () => void][] = [];
items: [T, () => void, Promise<void>][] = [];

get size() {
return this.items.length;
}

push(item: T): Promise<void> {
// this.items.push is synchronous with promise creation:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/Promise
return new Promise<void>((resolve) => {
this.items.push([item, resolve]);
let itemPromiseResolve;
const itemPromise = new Promise<void>((resolve) => {
// Setting itemPromiseResolve is synchronous with promise creation:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/Promise
itemPromiseResolve = resolve;
});
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
this.items.push([item, itemPromiseResolve!, itemPromise]);
return itemPromise;
}

pop(upToN: number): [T[], () => void] {
Expand Down Expand Up @@ -434,6 +439,8 @@ export class Client {

private settings: Promise<LangSmithSettings> | null;

private blockOnRootRunFinalization = true;

constructor(config: ClientConfig = {}) {
const defaultConfig = Client.getDefaultClientConfig();

Expand All @@ -460,6 +467,8 @@ export class Client {
config.hideOutputs ?? config.anonymizer ?? defaultConfig.hideOutputs;

this.autoBatchTracing = config.autoBatchTracing ?? this.autoBatchTracing;
this.blockOnRootRunFinalization =
config.blockOnRootRunFinalization ?? this.blockOnRootRunFinalization;
this.pendingAutoBatchedRunLimit =
config.pendingAutoBatchedRunLimit ?? this.pendingAutoBatchedRunLimit;
this.fetchOptions = config.fetchOptions || {};
Expand Down Expand Up @@ -966,7 +975,11 @@ export class Client {
data.trace_id !== undefined &&
data.dotted_order !== undefined
) {
if (run.end_time !== undefined && data.parent_run_id === undefined) {
if (
run.end_time !== undefined &&
data.parent_run_id === undefined &&
this.blockOnRootRunFinalization
) {
// Trigger a batch as soon as a root trace ends and block to ensure trace finishes
// in serverless environments.
await this.processRunOperation({ action: "update", item: data }, true);
Expand Down Expand Up @@ -3885,4 +3898,31 @@ export class Client {
throw new Error(`Invalid public ${kind} URL or token: ${urlOrToken}`);
}
}

/**
* Awaits all pending trace batches. Useful for environments where
* you need to be sure that all tracing requests finish before execution ends,
* such as serverless environments.
*
* @example
* ```
* import { Client } from "langsmith";
*
* const client = new Client();
*
* try {
* // Tracing happens here
* ...
* } finally {
* await client.awaitPendingTraceBatches();
* }
* ```
*
* @returns A promise that resolves once all currently pending traces have sent.
*/
public awaitPendingTraceBatches() {
return Promise.all(
this.autoBatchQueue.items.map(([, , promise]) => promise)
);
}
}
2 changes: 1 addition & 1 deletion js/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@ export { RunTree, type RunTreeConfig } from "./run_trees.js";
export { overrideFetchImplementation } from "./singletons/fetch.js";

// Update using yarn bump-version
export const __version__ = "0.1.61";
export const __version__ = "0.1.63";
113 changes: 113 additions & 0 deletions js/src/tests/batch_client.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,119 @@ describe("Batch client tracing", () => {
});
});

it("should not trigger a batch on root run end and instead batch call with previous batch if blockOnRootRunFinalization is false", async () => {
const client = new Client({
apiKey: "test-api-key",
autoBatchTracing: true,
blockOnRootRunFinalization: false,
});
const callSpy = jest
.spyOn((client as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
jest
.spyOn(client as any, "batchEndpointIsSupported")
.mockResolvedValue(true);
const projectName = "__test_batch";

const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await client.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});

expect((client as any).autoBatchQueue.size).toBe(1);
// Wait for first batch to send
await new Promise((resolve) => setTimeout(resolve, 300));
expect((client as any).autoBatchQueue.size).toBe(0);

const endTime = Math.floor(new Date().getTime() / 1000);

// Start the the second batch
await client.updateRun(runId, {
outputs: { output: ["Hi"] },
dotted_order: dottedOrder,
trace_id: runId,
end_time: endTime,
});

const runId2 = uuidv4();
const dottedOrder2 = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId2
);

// Should aggregate on the second batch
await client.createRun({
id: runId2,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world 2" },
trace_id: runId2,
dotted_order: dottedOrder2,
});

// 2 runs in the queue
expect((client as any).autoBatchQueue.size).toBe(2);
await client.awaitPendingTraceBatches();
expect((client as any).autoBatchQueue.size).toBe(0);

expect(callSpy.mock.calls.length).toEqual(2);
const calledRequestParam: any = callSpy.mock.calls[0][2];
const calledRequestParam2: any = callSpy.mock.calls[1][2];
expect(JSON.parse(calledRequestParam?.body)).toEqual({
post: [
expect.objectContaining({
id: runId,
run_type: "llm",
inputs: {
text: "hello world",
},
trace_id: runId,
dotted_order: dottedOrder,
}),
],
patch: [],
});

expect(JSON.parse(calledRequestParam2?.body)).toEqual({
post: [
expect.objectContaining({
id: runId2,
run_type: "llm",
inputs: {
text: "hello world 2",
},
trace_id: runId2,
dotted_order: dottedOrder2,
}),
],
patch: [
expect.objectContaining({
id: runId,
dotted_order: dottedOrder,
trace_id: runId,
end_time: endTime,
outputs: {
output: ["Hi"],
},
}),
],
});
});

it("should send traces above the batch size and see even batches", async () => {
const client = new Client({
apiKey: "test-api-key",
Expand Down
4 changes: 2 additions & 2 deletions js/src/tests/traceable.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,7 @@ describe("async generators", () => {
);

const numbers: number[] = [];
for await (const num of await stream()) {
for await (const num of (await stream()) as unknown as AsyncGenerator<number>) {
numbers.push(num);
}

Expand Down Expand Up @@ -719,7 +719,7 @@ describe("deferred input", () => {
const { client, callSpy } = mockClient();
const parrotStream = traceable(
async function* parrotStream(input: ReadableStream<string>) {
for await (const token of input) {
for await (const token of input as unknown as AsyncGenerator<string>) {
yield token;
}
},
Expand Down
40 changes: 40 additions & 0 deletions js/src/tests/wrapped_openai.int.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import { wrapOpenAI } from "../wrappers/index.js";
import { Client } from "../client.js";
import { mockClient } from "./utils/mock_client.js";
import { getAssumedTreeFromCalls } from "./utils/tree.js";
import { zodResponseFormat } from "openai/helpers/zod";
import { z } from "zod";

test("wrapOpenAI should return type compatible with OpenAI", async () => {
let originalClient = new OpenAI();
Expand Down Expand Up @@ -534,3 +536,41 @@ test("chat.concurrent extra name", async () => {
},
});
});

test.concurrent("beta.chat.completions.parse", async () => {
const { client, callSpy } = mockClient();

const openai = wrapOpenAI(new OpenAI(), {
client,
});

await openai.beta.chat.completions.parse({
model: "gpt-4o-mini",
temperature: 0,
messages: [
{
role: "user",
content: "I am Jacob",
},
],
response_format: zodResponseFormat(
z.object({
name: z.string(),
}),
"name"
),
});

for (const call of callSpy.mock.calls) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(["POST", "PATCH"]).toContain((call[2] as any)["method"]);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(JSON.parse((call[2] as any).body).extra.metadata).toEqual({
ls_model_name: "gpt-4o-mini",
ls_model_type: "chat",
ls_provider: "openai",
ls_temperature: 0,
});
}
callSpy.mockClear();
});
Loading

0 comments on commit 18ed4aa

Please sign in to comment.