Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ TESTS_PATH := tests
-include .env

ifndef UV_VERSION
UV_VERSION := 0.7.6
UV_VERSION := 0.7.19
endif

.PHONY: uv_check venv sync update format lint test release
Expand Down
5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ build-backend = "hatchling.build"
[project]
name = "draive"
description = "Framework designed to simplify and accelerate the development of LLM-based applications."
version = "0.75.4"
version = "0.75.5"
readme = "README.md"
maintainers = [
{ name = "Kacper Kaliński", email = "kacper.kalinski@miquido.com" },
Expand All @@ -21,7 +21,7 @@ classifiers = [
"Topic :: Software Development :: Libraries :: Application Frameworks",
]
license = { file = "LICENSE" }
dependencies = ["numpy~=2.2", "haiway~=0.24.0"]
dependencies = ["numpy~=2.2", "haiway~=0.24.2"]

[project.urls]
Homepage = "https://miquido.com"
Expand All @@ -32,6 +32,7 @@ sentencepiece = ["sentencepiece~=0.2"]
cohere = ["cohere~=5.15"]
cohere_bedrock = ["cohere~=5.13", "boto3~=1.37"]
openai = ["openai~=1.88", "tiktoken~=0.9"]
openai_realtime = ["openai[realtime]~=1.88", "tiktoken~=0.9"]
anthropic = ["anthropic~=0.54", "tokenizers~=0.21"]
anthropic_bedrock = ["anthropic[bedrock]~=0.54", "tokenizers~=0.21"]
mistral = ["sentencepiece~=0.2", "mistralai~=1.8"]
Expand Down
4 changes: 4 additions & 0 deletions src/draive/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
asynchronous,
cache,
ctx,
execute_concurrently,
getenv_base64,
getenv_bool,
getenv_float,
Expand All @@ -45,6 +46,7 @@
process_concurrently,
retry,
setup_logging,
stream_concurrently,
throttle,
timeout,
traced,
Expand Down Expand Up @@ -391,6 +393,7 @@
"cache",
"choice_completion",
"ctx",
"execute_concurrently",
"getenv_base64",
"getenv_bool",
"getenv_float",
Expand All @@ -410,6 +413,7 @@
"split_sequence",
"split_text",
"stage",
"stream_concurrently",
"throttle",
"timeout",
"tool",
Expand Down
16 changes: 11 additions & 5 deletions src/draive/evaluation/scenario.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from asyncio import gather
from collections.abc import Callable, Sequence
from typing import Any, Protocol, Self, cast, overload, runtime_checkable

from haiway import AttributePath, ScopeContext, as_list, ctx
from haiway import AttributePath, ScopeContext, as_list, ctx, execute_concurrently

from draive.commons import META_EMPTY, Meta, MetaValues
from draive.evaluation.evaluator import EvaluatorResult, PreparedEvaluator
Expand Down Expand Up @@ -84,13 +83,20 @@ async def evaluating[Value](
/,
evaluators: PreparedEvaluator[Value],
*_evaluators: PreparedEvaluator[Value],
concurrent_tasks: int = 2,
meta: Meta | MetaValues | None = None,
) -> Self:
async def execute(
evaluator: PreparedEvaluator[Value],
) -> EvaluatorResult:
return await evaluator(value)

return cls(
evaluations=tuple(
await gather(
*(evaluator(value) for evaluator in (evaluators, *_evaluators)),
return_exceptions=False,
await execute_concurrently(
[evaluators, *_evaluators],
handler=execute,
concurrent_tasks=concurrent_tasks,
),
),
meta=Meta.of(meta),
Expand Down
33 changes: 28 additions & 5 deletions src/draive/evaluation/suite.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import random
from asyncio import Lock, gather
from collections.abc import Callable, Iterable, Sequence
from pathlib import Path
from typing import Protocol, Self, runtime_checkable
from uuid import UUID, uuid4

from haiway import ScopeContext, as_list, asynchronous, ctx
from haiway import ScopeContext, as_list, asynchronous, ctx, execute_concurrently

from draive.commons import META_EMPTY, Meta, MetaValues
from draive.evaluation.evaluator import EvaluatorResult, PreparedEvaluator
Expand Down Expand Up @@ -179,12 +180,19 @@ async def evaluating[Value](
/,
evaluators: PreparedScenarioEvaluator[Value] | PreparedEvaluator[Value],
*_evaluators: PreparedScenarioEvaluator[Value] | PreparedEvaluator[Value],
concurrent_tasks: int = 2,
meta: Meta | MetaValues | None = None,
) -> Self:
async def execute(
evaluator: PreparedScenarioEvaluator[Value] | PreparedEvaluator[Value],
) -> ScenarioEvaluatorResult | EvaluatorResult:
return await evaluator(value)

return cls.of(
*await gather(
*[evaluator(value) for evaluator in [evaluators, *_evaluators]],
return_exceptions=False,
*await execute_concurrently(
[evaluators, *_evaluators],
handler=execute,
concurrent_tasks=concurrent_tasks,
),
meta=Meta.of(meta),
)
Expand Down Expand Up @@ -299,7 +307,9 @@ async def __call__(

async def _evaluate(
self,
case_parameters: Sequence[EvaluationSuiteCase[CaseParameters] | CaseParameters | UUID],
case_parameters: Sequence[EvaluationSuiteCase[CaseParameters] | CaseParameters | UUID]
| float
| int,
/,
*,
suite_parameters: SuiteParameters | None = None,
Expand All @@ -313,6 +323,19 @@ async def _evaluate(
if not case_parameters:
cases = suite_data.cases

elif isinstance(case_parameters, int):
cases = random.sample( # nosec: B311
suite_data.cases,
min(len(suite_data.cases), case_parameters),
)

elif isinstance(case_parameters, float):
assert 0 < case_parameters <= 1 # nosec: B101
cases = random.sample( # nosec: B311
suite_data.cases,
min(len(suite_data.cases), int(len(suite_data.cases) * case_parameters)),
)

else:
cases = []
for case in case_parameters:
Expand Down
41 changes: 26 additions & 15 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.