Skip to content

Commit

Permalink
fixed failing tests
Browse files Browse the repository at this point in the history
  • Loading branch information
langchain4j committed Sep 17, 2024
1 parent 558433d commit 07ce173
Show file tree
Hide file tree
Showing 7 changed files with 34 additions and 23 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/nightly.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ jobs:
TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }}
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
WEAVIATE_HOST: ${{ secrets.WEAVIATE_HOST }}
CI_DELAY_SECONDS_AZURE_OPENAI: 2
CI_DELAY_SECONDS_AZURE_OPENAI: 1
CI_DELAY_SECONDS_BEDROCK: 1
CI_DELAY_SECONDS_GOOGLE_AI_GEMINI: 10
CI_DELAY_SECONDS_VERTEX_AI_GEMINI: 5
CI_DELAY_SECONDS_GOOGLE_AI_GEMINI: 2
CI_DELAY_SECONDS_VERTEX_AI_GEMINI: 1
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ public String getLocation() {
void afterEach() throws InterruptedException {
String ciDelaySeconds = System.getenv("CI_DELAY_SECONDS_AZURE_OPENAI");
if (ciDelaySeconds != null) {
Thread.sleep(Integer.parseInt(ciDelaySeconds));
Thread.sleep(Integer.parseInt(ciDelaySeconds) * 1000L);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ void tools_should_work_without_tokenizer() {
void afterEach() throws InterruptedException {
String ciDelaySeconds = System.getenv("CI_DELAY_SECONDS_AZURE_OPENAI");
if (ciDelaySeconds != null) {
Thread.sleep(Integer.parseInt(ciDelaySeconds));
Thread.sleep(Integer.parseInt(ciDelaySeconds) * 1000L);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ void testBedrockMistralAiMixtral8x7bInstructChatModel() {
void afterEach() throws InterruptedException {
String ciDelaySeconds = System.getenv("CI_DELAY_SECONDS_BEDROCK");
if (ciDelaySeconds != null) {
Thread.sleep(Integer.parseInt(ciDelaySeconds));
Thread.sleep(Integer.parseInt(ciDelaySeconds) * 1000L);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -589,9 +589,10 @@ void should_deserialize_to_POJO() {
.apiKey(GOOGLE_AI_GEMINI_API_KEY)
.modelName("gemini-1.5-flash")
.logRequestsAndResponses(true)
.responseFormat(ResponseFormat.builder()
.jsonSchema(JsonSchemas.jsonSchemaFrom(Color.class).get())
.build())
.responseFormat(ResponseFormat.builder()
.type(JSON)
.jsonSchema(JsonSchemas.jsonSchemaFrom(Color.class).get())
.build())
// Equivalent to:
// .responseFormat(ResponseFormat.builder()
// .type(JSON)
Expand Down Expand Up @@ -677,7 +678,7 @@ void should_support_tool_config() {
void afterEach() throws InterruptedException {
String ciDelaySeconds = System.getenv("CI_DELAY_SECONDS_GOOGLE_AI_GEMINI");
if (ciDelaySeconds != null) {
Thread.sleep(Integer.parseInt(ciDelaySeconds));
Thread.sleep(Integer.parseInt(ciDelaySeconds) * 1000L);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -918,7 +918,7 @@ void should_support_enum_structured_output() {
void afterEach() throws InterruptedException {
String ciDelaySeconds = System.getenv("CI_DELAY_SECONDS_VERTEX_AI_GEMINI");
if (ciDelaySeconds != null) {
Thread.sleep(Integer.parseInt(ciDelaySeconds));
Thread.sleep(Integer.parseInt(ciDelaySeconds) * 1000L);
}
}
}
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
package dev.langchain4j.memory.chat;

import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.data.message.*;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.SystemMessage;
import dev.langchain4j.data.message.ToolExecutionResultMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.model.Tokenizer;
import dev.langchain4j.model.openai.OpenAiTokenizer;
Expand All @@ -10,7 +14,9 @@

import static dev.langchain4j.data.message.SystemMessage.systemMessage;
import static dev.langchain4j.data.message.UserMessage.userMessage;
import static dev.langchain4j.internal.TestUtils.*;
import static dev.langchain4j.internal.TestUtils.aiMessageWithTokens;
import static dev.langchain4j.internal.TestUtils.systemMessageWithTokens;
import static dev.langchain4j.internal.TestUtils.userMessageWithTokens;
import static dev.langchain4j.model.openai.OpenAiModelName.GPT_3_5_TURBO;
import static java.util.Collections.singletonList;

Expand Down Expand Up @@ -431,7 +437,9 @@ void should_evict_orphan_ToolExecutionResultMessage_when_evicting_AiMessage_with
void should_evict_multiple_orphan_ToolExecutionResultMessages_when_evicting_AiMessage_with_ToolExecutionRequests() {

// given
ChatMemory chatMemory = TokenWindowChatMemory.withMaxTokens(87, TOKENIZER);
int maxTokens = 79;

ChatMemory chatMemory = TokenWindowChatMemory.withMaxTokens(maxTokens, TOKENIZER);


// when
Expand Down Expand Up @@ -460,14 +468,14 @@ void should_evict_multiple_orphan_ToolExecutionResultMessages_when_evicting_AiMe
.build();
AiMessage aiMessage = AiMessage.from(toolExecutionRequest1, toolExecutionRequest2);
int aiMessageTokens = TOKENIZER.estimateTokenCountInMessage(aiMessage);
assertThat(aiMessageTokens).isEqualTo(62);
assertThat(aiMessageTokens).isEqualTo(54);
chatMemory.add(aiMessage);

// then
assertThat(chatMemory.messages()).containsExactly(userMessage, aiMessage);
assertThat(TOKENIZER.estimateTokenCountInMessages(chatMemory.messages()))
.isEqualTo(EXTRA_TOKENS_PER_REQUEST + userMessageTokens + aiMessageTokens)
.isEqualTo(82);
.isEqualTo(74);


// when
Expand All @@ -481,7 +489,7 @@ void should_evict_multiple_orphan_ToolExecutionResultMessages_when_evicting_AiMe
assertThat(chatMemory.messages()).containsExactly(userMessage, aiMessage, toolExecutionResultMessage1);
assertThat(TOKENIZER.estimateTokenCountInMessages(chatMemory.messages()))
.isEqualTo(EXTRA_TOKENS_PER_REQUEST + userMessageTokens + aiMessageTokens + toolExecutionResultMessage1Tokens)
.isEqualTo(87);
.isEqualTo(maxTokens);

// when
ToolExecutionResultMessage toolExecutionResultMessage2 =
Expand All @@ -495,7 +503,7 @@ void should_evict_multiple_orphan_ToolExecutionResultMessages_when_evicting_AiMe
.containsExactly(aiMessage, toolExecutionResultMessage1, toolExecutionResultMessage2);
assertThat(TOKENIZER.estimateTokenCountInMessages(chatMemory.messages()))
.isEqualTo(EXTRA_TOKENS_PER_REQUEST + aiMessageTokens + toolExecutionResultMessage1Tokens + toolExecutionResultMessage2Tokens)
.isEqualTo(75);
.isEqualTo(67);


// when aiMessage2 is added and aiMessage has to be evicted
Expand All @@ -515,7 +523,9 @@ void should_evict_multiple_orphan_ToolExecutionResultMessages_when_evicting_AiMe
void should_evict_multiple_orphan_ToolExecutionResultMessages_when_evicting_AiMessage_with_ToolExecutionRequests_when_SystemMessage_is_present() {

// given
ChatMemory chatMemory = TokenWindowChatMemory.withMaxTokens(96, TOKENIZER);
int maxTokens = 88;

ChatMemory chatMemory = TokenWindowChatMemory.withMaxTokens(maxTokens, TOKENIZER);


// when
Expand Down Expand Up @@ -557,14 +567,14 @@ void should_evict_multiple_orphan_ToolExecutionResultMessages_when_evicting_AiMe
.build();
AiMessage aiMessage = AiMessage.from(toolExecutionRequest1, toolExecutionRequest2);
int aiMessageTokens = TOKENIZER.estimateTokenCountInMessage(aiMessage);
assertThat(aiMessageTokens).isEqualTo(62);
assertThat(aiMessageTokens).isEqualTo(54);
chatMemory.add(aiMessage);

// then
assertThat(chatMemory.messages()).containsExactly(systemMessage, userMessage, aiMessage);
assertThat(TOKENIZER.estimateTokenCountInMessages(chatMemory.messages()))
.isEqualTo(EXTRA_TOKENS_PER_REQUEST + systemMessageTokens + userMessageTokens + aiMessageTokens)
.isEqualTo(91);
.isEqualTo(83);


// when
Expand All @@ -579,7 +589,7 @@ void should_evict_multiple_orphan_ToolExecutionResultMessages_when_evicting_AiMe
.containsExactly(systemMessage, userMessage, aiMessage, toolExecutionResultMessage1);
assertThat(TOKENIZER.estimateTokenCountInMessages(chatMemory.messages()))
.isEqualTo(EXTRA_TOKENS_PER_REQUEST + systemMessageTokens + userMessageTokens + aiMessageTokens + toolExecutionResultMessage1Tokens)
.isEqualTo(96);
.isEqualTo(maxTokens);


// when
Expand All @@ -594,7 +604,7 @@ void should_evict_multiple_orphan_ToolExecutionResultMessages_when_evicting_AiMe
.containsExactly(systemMessage, aiMessage, toolExecutionResultMessage1, toolExecutionResultMessage2);
assertThat(TOKENIZER.estimateTokenCountInMessages(chatMemory.messages()))
.isEqualTo(EXTRA_TOKENS_PER_REQUEST + systemMessageTokens + aiMessageTokens + toolExecutionResultMessage1Tokens + toolExecutionResultMessage2Tokens)
.isEqualTo(84);
.isEqualTo(76);


// when aiMessage2 is added and aiMessage has to be evicted
Expand Down

0 comments on commit 07ce173

Please sign in to comment.