Skip to content

Fix [Object object] chat title generation #5425

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Apr 29, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions core/llm/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,11 @@ export class LLMError extends Error {
}

export function isModelInstaller(provider: any): provider is ModelInstaller {
return provider && typeof provider.installModel === "function" && typeof provider.isInstallingModel === "function";;
return (
provider &&
typeof provider.installModel === "function" &&
typeof provider.isInstallingModel === "function"
);
}

type InteractionStatus = "in_progress" | "success" | "error" | "cancelled";
Expand Down Expand Up @@ -816,8 +820,8 @@ export abstract class BaseLLM implements ILLM {
options: LLMFullCompletionOptions = {},
) {
let completion = "";
for await (const chunk of this.streamChat(messages, signal, options)) {
completion += chunk.content;
for await (const message of this.streamChat(messages, signal, options)) {
completion += renderChatMessage(message);
}
return { role: "assistant" as const, content: completion };
}
Expand Down
8 changes: 7 additions & 1 deletion core/llm/llms/Mock.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
import { ChatMessage, CompletionOptions, LLMOptions } from "../../index.js";
import { BaseLLM } from "../index.js";

type MockMessage = ChatMessage | "REPEAT_LAST_MSG" | "REPEAT_SYSTEM_MSG";
type MockMessage =
| ChatMessage
| "REPEAT_LAST_MSG"
| "REPEAT_SYSTEM_MSG"
| "ERROR";

class MockLLM extends BaseLLM {
public completion: string = "Test Completion";
Expand Down Expand Up @@ -49,6 +53,8 @@ class MockLLM extends BaseLLM {
messages.find((m) => m.role === "system")?.content || "",
};
break;
case "ERROR":
throw new Error("Intentional error");
default:
yield message;
}
Expand Down
34 changes: 6 additions & 28 deletions core/util/chatDescriber.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,43 +34,21 @@ describe("ChatDescriber", () => {
const message = "Test message";
const completionOptions: LLMFullCompletionOptions = { temperature: 0.7 };

testLLM.chat = jest.fn().mockResolvedValue({ content: "Test response" });
testLLM.chatStreams = [[{ role: "assistant", content: "Test response" }]];

await ChatDescriber.describe(testLLM, completionOptions, message);

expect(completionOptions.maxTokens).toBe(ChatDescriber.maxTokens);
});

it("should call model.chat with correct parameters", async () => {
const message = "Test message";
const cleanedMessage = "Test message";
const expectedPrompt = ChatDescriber.prompt + cleanedMessage;
const completionOptions: LLMFullCompletionOptions = {};

testLLM.chat = jest.fn().mockResolvedValue({ content: "Test response" });

await ChatDescriber.describe(testLLM, completionOptions, message);

expect(testLLM.chat).toHaveBeenCalledWith(
[
{
role: "user",
content: expectedPrompt,
},
],
expect.any(AbortSignal),
completionOptions,
);
});

it("should return processed content from the model response", async () => {
const message = "Test message";
const modelResponseContent = "Model response content";
const expectedResult = "Model response content";

testLLM.chat = jest
.fn()
.mockResolvedValue({ content: modelResponseContent });
testLLM.chatStreams = [
[{ role: "assistant", content: modelResponseContent }],
];

const result = await ChatDescriber.describe(testLLM, {}, message);

Expand All @@ -81,11 +59,11 @@ describe("ChatDescriber", () => {
const message = "Test message";
const completionOptions: LLMFullCompletionOptions = {};

testLLM.chat = jest.fn().mockRejectedValue(new Error("Chat error"));
testLLM.chatStreams = [["ERROR"]];

await expect(
ChatDescriber.describe(testLLM, completionOptions, message),
).rejects.toThrow("Chat error");
).rejects.toThrow();
});
});
});
3 changes: 2 additions & 1 deletion core/util/chatDescriber.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import { removeCodeBlocksAndTrim, removeQuotesAndEscapes } from ".";

import type { FromCoreProtocol, ToCoreProtocol } from "../protocol";
import type { IMessenger } from "../protocol/messenger";
import { renderChatMessage } from "./messageContent";

export class ChatDescriber {
static maxTokens = 12;
Expand Down Expand Up @@ -42,7 +43,7 @@ export class ChatDescriber {
);

// Set the title
return removeQuotesAndEscapes(titleResponse.content.toString());
return removeQuotesAndEscapes(renderChatMessage(titleResponse));
}

// // TODO: Allow the user to manually set specific/tailored prompts to generate their titles
Expand Down
Loading