Skip to content

Commit a716045

Browse files
committed
adds some tests. Improve README
1 parent 1ceb034 commit a716045

File tree

8 files changed

+390
-23
lines changed

8 files changed

+390
-23
lines changed

README.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,16 @@ See the [`LangChain.ChatModels.ChatBumblebee` documentation](https://hexdocs.pm/
240240

241241
## Testing
242242

243+
Before you can run the tests, make sure you have the environment variables set.
244+
245+
You can do this by running:
246+
247+
```
248+
source .envrc_template
249+
```
250+
251+
Or you can copy it to `.envrc` and populate it with your private API values.
252+
243253
To run all the tests including the ones that perform live calls against the OpenAI API, use the following command:
244254

245255
```

lib/chat_models/chat_bumblebee.ex

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -484,11 +484,11 @@ defmodule LangChain.ChatModels.ChatBumblebee do
484484

485485
# Track non-streaming response completion
486486
LangChain.Telemetry.emit_event(
487-
[:langchain, :llm, :response, streaming: false],
487+
[:langchain, :llm, :response, :non_streaming],
488488
%{system_time: System.system_time()},
489489
%{
490490
model: inspect(model.serving),
491-
response_size: byte_size(content)
491+
response_size: byte_size(inspect(content))
492492
}
493493
)
494494

lib/chat_models/chat_mistral_ai.ex

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -341,14 +341,14 @@ defmodule LangChain.ChatModels.ChatMistralAI do
341341
result ->
342342
# Track non-streaming response completion
343343
LangChain.Telemetry.emit_event(
344-
[:langchain, :llm, :response, streaming: false],
344+
[:langchain, :llm, :response, :non_streaming],
345345
%{system_time: System.system_time()},
346346
%{
347347
model: mistralai.model,
348348
response_size: byte_size(inspect(result))
349349
}
350350
)
351-
351+
352352
Callbacks.fire(mistralai.callbacks, :on_llm_new_message, [result])
353353
result
354354
end
@@ -432,10 +432,7 @@ defmodule LangChain.ChatModels.ChatMistralAI do
432432
def do_process_response(model, %{"choices" => [], "usage" => %{} = _usage} = data) do
433433
case get_token_usage(data) do
434434
%TokenUsage{} = token_usage ->
435-
Callbacks.fire(model.callbacks, :on_llm_token_usage, [
436-
get_token_usage(data)
437-
])
438-
435+
Callbacks.fire(model.callbacks, :on_llm_token_usage, [token_usage])
439436
:ok
440437

441438
nil ->

lib/chat_models/chat_ollama_ai.ex

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -433,7 +433,7 @@ defmodule LangChain.ChatModels.ChatOllamaAI do
433433
result ->
434434
# Track non-streaming response completion
435435
LangChain.Telemetry.emit_event(
436-
[:langchain, :llm, :response, streaming: false],
436+
[:langchain, :llm, :response, :non_streaming],
437437
%{system_time: System.system_time()},
438438
%{
439439
model: ollama_ai.model,

lib/chat_models/chat_open_ai.ex

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -705,7 +705,7 @@ defmodule LangChain.ChatModels.ChatOpenAI do
705705

706706
# Track non-streaming response completion
707707
LangChain.Telemetry.emit_event(
708-
[:langchain, :llm, :response, streaming: false],
708+
[:langchain, :llm, :response, :non_streaming],
709709
%{system_time: System.system_time()},
710710
%{
711711
model: openai.model,

lib/chat_models/chat_perplexity.ex

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -374,17 +374,17 @@ defmodule LangChain.ChatModels.ChatPerplexity do
374374

375375
result ->
376376
Callbacks.fire(perplexity.callbacks, :on_llm_new_message, [result])
377-
377+
378378
# Track non-streaming response completion
379379
Telemetry.emit_event(
380-
[:langchain, :llm, :response, streaming: false],
380+
[:langchain, :llm, :response, :non_streaming],
381381
%{system_time: System.system_time()},
382382
%{
383383
model: perplexity.model,
384384
response_size: byte_size(inspect(result))
385385
}
386386
)
387-
387+
388388
result
389389
end
390390

@@ -651,14 +651,15 @@ defmodule LangChain.ChatModels.ChatPerplexity do
651651
end
652652
end
653653

654-
def do_process_response(_model, %{"choices" => [
655-
%{
656-
"delta" => %{"role" => role, "content" => content},
657-
"finish_reason" => finish,
658-
"index" => index
659-
} = _choice
660-
]
661-
}) do
654+
def do_process_response(_model, %{
655+
"choices" => [
656+
%{
657+
"delta" => %{"role" => role, "content" => content},
658+
"finish_reason" => finish,
659+
"index" => index
660+
} = _choice
661+
]
662+
}) do
662663
status = finish_reason_to_status(finish)
663664

664665
data =
@@ -686,7 +687,8 @@ defmodule LangChain.ChatModels.ChatPerplexity do
686687
"delta" => %{"content" => content},
687688
"finish_reason" => finish,
688689
"index" => index
689-
} = _choice
690+
}
691+
| _
690692
]
691693
}
692694
) do

lib/chat_models/chat_vertex_ai.ex

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,7 @@ defmodule LangChain.ChatModels.ChatVertexAI do
335335

336336
# Track non-streaming response completion
337337
LangChain.Telemetry.emit_event(
338-
[:langchain, :llm, :response, streaming: false],
338+
[:langchain, :llm, :response, :non_streaming],
339339
%{system_time: System.system_time()},
340340
%{
341341
model: vertex_ai.model,

0 commit comments

Comments
 (0)