Skip to content

Commit 87e5fa5

Browse files
fix: use get_temperature in base LLM generate (#1520)
Matches the behavior in agenerate, avoiding some API call failures
1 parent efdda3f commit 87e5fa5

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/ragas/llms/base.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ async def generate(
8888
"""Generate text using the given event loop."""
8989

9090
if temperature is None:
91-
temperature = 1e-8
91+
temperature = self.get_temperature(n)
9292

9393
if is_async:
9494
agenerate_text_with_retry = add_async_retry(
@@ -280,7 +280,7 @@ async def agenerate_text(
280280
callbacks: Callbacks = None,
281281
) -> LLMResult:
282282
if temperature is None:
283-
temperature = 1e-8
283+
temperature = self.get_temperature(n)
284284

285285
kwargs = self.check_args(n, temperature, stop, callbacks)
286286
li_response = await self.llm.acomplete(prompt.to_string(), **kwargs)

0 commit comments

Comments
 (0)