Skip to content

Fix type annotation mismatch in Task.context field #3020

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/crewai/crew.py
Original file line number Diff line number Diff line change
Expand Up @@ -1034,7 +1034,7 @@ def _get_context(self, task: Task, task_outputs: List[TaskOutput]) -> str:
context = (
aggregate_raw_outputs_from_task_outputs(task_outputs)
if task.context is NOT_SPECIFIED
else aggregate_raw_outputs_from_tasks(task.context)
else aggregate_raw_outputs_from_tasks(cast(List["Task"], task.context))
)
return context

Expand Down
7 changes: 5 additions & 2 deletions src/crewai/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from pydantic import (
UUID4,
BaseModel,
ConfigDict,
Field,
PrivateAttr,
field_validator,
Expand All @@ -39,7 +40,7 @@
from crewai.tasks.task_output import TaskOutput
from crewai.tools.base_tool import BaseTool
from crewai.utilities.config import process_config
from crewai.utilities.constants import NOT_SPECIFIED
from crewai.utilities.constants import NOT_SPECIFIED, _NotSpecified
from crewai.utilities.guardrail import process_guardrail, GuardrailResult
from crewai.utilities.converter import Converter, convert_to_model
from crewai.utilities.events import (
Expand Down Expand Up @@ -78,6 +79,8 @@ class Task(BaseModel):
used_tools: int = 0
tools_errors: int = 0
delegations: int = 0

model_config = ConfigDict(arbitrary_types_allowed=True)
i18n: I18N = I18N()
name: Optional[str] = Field(default=None)
prompt_context: Optional[str] = None
Expand All @@ -95,7 +98,7 @@ class Task(BaseModel):
agent: Optional[BaseAgent] = Field(
description="Agent responsible for execution the task.", default=None
)
context: Optional[List["Task"]] = Field(
context: Union[List["Task"], _NotSpecified, None] = Field(
description="Other tasks that will have their output used as context for this task.",
default=NOT_SPECIFIED,
)
Expand Down
103 changes: 103 additions & 0 deletions tests/test_task_context_type_annotation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
"""
Test for issue #3019: Type annotation for `context` in `Task` is `Optional[List["Task"]]`, but default is `NOT_SPECIFIED`

This test reproduces the type annotation issue and verifies the fix.
"""

import pytest
from typing import get_type_hints, get_origin, get_args
from pydantic import ValidationError

from crewai.task import Task
from crewai.utilities.constants import NOT_SPECIFIED, _NotSpecified


class TestTaskContextTypeAnnotation:
"""Test cases for Task context field type annotation issue."""

def test_task_context_default_value_is_not_specified(self):
"""Test that Task.context default value is NOT_SPECIFIED sentinel."""
task = Task(description="Test task", expected_output="Test output")
assert task.context is NOT_SPECIFIED
assert isinstance(task.context, _NotSpecified)

def test_task_context_can_be_set_to_none(self):
"""Test that Task.context can be explicitly set to None."""
task = Task(description="Test task", expected_output="Test output", context=None)
assert task.context is None

def test_task_context_can_be_set_to_empty_list(self):
"""Test that Task.context can be set to an empty list."""
task = Task(description="Test task", expected_output="Test output", context=[])
assert task.context == []
assert isinstance(task.context, list)

def test_task_context_can_be_set_to_task_list(self):
"""Test that Task.context can be set to a list of tasks."""
task1 = Task(description="Task 1", expected_output="Output 1")
task2 = Task(description="Task 2", expected_output="Output 2")
task3 = Task(description="Task 3", expected_output="Output 3", context=[task1, task2])

assert task3.context == [task1, task2]
assert isinstance(task3.context, list)
assert len(task3.context) == 2

def test_task_context_type_annotation_includes_not_specified(self):
"""Test that the type annotation for context includes _NotSpecified type."""
type_hints = get_type_hints(Task)
context_type = type_hints.get('context')

assert context_type is not None

origin = get_origin(context_type)
if origin is not None:
args = get_args(context_type)

assert any('_NotSpecified' in str(arg) or arg is _NotSpecified for arg in args), \
f"Type annotation should include _NotSpecified, got: {args}"

def test_task_context_distinguishes_not_passed_from_none(self):
"""Test that NOT_SPECIFIED distinguishes between not passed and None."""
task_not_passed = Task(description="Test task", expected_output="Test output")

task_explicit_none = Task(description="Test task", expected_output="Test output", context=None)

task_empty_list = Task(description="Test task", expected_output="Test output", context=[])

assert task_not_passed.context is NOT_SPECIFIED
assert task_explicit_none.context is None
assert task_empty_list.context == []

assert task_not_passed.context is not task_explicit_none.context
assert task_not_passed.context != task_empty_list.context
assert task_explicit_none.context != task_empty_list.context

def test_task_context_usage_in_crew_logic(self):
"""Test that the context field works correctly with crew logic."""
from crewai.utilities.constants import NOT_SPECIFIED

task_with_not_specified = Task(description="Task 1", expected_output="Output 1")
task_with_none = Task(description="Task 2", expected_output="Output 2", context=None)
task_with_empty_list = Task(description="Task 3", expected_output="Output 3", context=[])

assert task_with_not_specified.context is NOT_SPECIFIED
assert task_with_none.context is not NOT_SPECIFIED
assert task_with_empty_list.context is not NOT_SPECIFIED

def test_task_context_repr_shows_not_specified(self):
"""Test that NOT_SPECIFIED has a proper string representation."""
task = Task(description="Test task", expected_output="Test output")
assert str(task.context) == "NOT_SPECIFIED"
assert repr(task.context) == "NOT_SPECIFIED"

def test_task_context_validation_accepts_valid_types(self):
"""Test that Task validation accepts all valid context types."""
try:
Task(description="Test 1", expected_output="Output 1")
Task(description="Test 2", expected_output="Output 2", context=None)
Task(description="Test 3", expected_output="Output 3", context=[])

task1 = Task(description="Task 1", expected_output="Output 1")
Task(description="Test 4", expected_output="Output 4", context=[task1])
except ValidationError as e:
pytest.fail(f"Valid context types should not raise ValidationError: {e}")
Loading