Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
6113289
UN-3154 [FEAT] Add execution status tracking and list enhancements
chandrasekharan-zipstack Jan 29, 2026
ec69cc3
UN-3154 [FEAT] Add reusable CardGridView component
chandrasekharan-zipstack Jan 29, 2026
f938be5
UN-3154 [FEAT] Refactor API Deployments to use CardGridView
chandrasekharan-zipstack Jan 29, 2026
558dbb8
UN-3154 [FEAT] Refactor Pipelines to use CardGridView
chandrasekharan-zipstack Jan 29, 2026
ab0190c
UN-3154 [FEAT] Improve navigation with scroll restoration
chandrasekharan-zipstack Jan 29, 2026
4fddcda
UN-3154 [FIX] Address CodeRabbit review comments
chandrasekharan-zipstack Jan 29, 2026
6e8bf78
UN-3154 [FIX] Address SonarQube issues and reduce code duplication
chandrasekharan-zipstack Jan 29, 2026
3d7fdf2
UN-3154 [FIX] Address SonarCloud ReDoS security hotspot
chandrasekharan-zipstack Jan 29, 2026
49c57ad
UN-3154 [FIX] Fix function argument mismatch in clearFileMarkers
chandrasekharan-zipstack Jan 29, 2026
d900a59
UN-3154 [FIX] Fix SonarQube issues and improve code quality
chandrasekharan-zipstack Jan 29, 2026
cfeb9a7
UN-3154 [FIX] Address SonarCloud code quality issues
chandrasekharan-zipstack Jan 29, 2026
7a5af7b
UN-3154 [FIX] Revert status pill colors and fix pipeline sorting
chandrasekharan-zipstack Jan 30, 2026
bb84c35
UN-3154 [FIX] Restore full list context on back navigation from logs
chandrasekharan-zipstack Feb 2, 2026
9ed7131
Merge branch 'main' into UN-3154-card-grid-view-pipelines-deployments
chandrasekharan-zipstack Feb 4, 2026
be99548
UN-3154 [FIX] Replace native elements with AntD components per PR review
chandrasekharan-zipstack Feb 10, 2026
91889df
UN-3154 [FIX] Address CodeRabbit PR review comments from #1769
chandrasekharan-zipstack Feb 10, 2026
1fad5f1
UN-3154 [FIX] ESLint eqeqeq error and add loading state to logs modal
chandrasekharan-zipstack Feb 10, 2026
09dadce
UN-3154 [FIX] Resolve card grid rendering issues and UI glitches
chandrasekharan-zipstack Feb 11, 2026
eb345a7
UN-3154 [FIX] Remove ineffective CSS hover overrides for action icons
chandrasekharan-zipstack Feb 14, 2026
0323fb6
UN-3154 [FIX] Backend sorting and share modal improvements
chandrasekharan-zipstack Feb 14, 2026
7934ba9
UN-3154 [REFACTOR] Extract shared hooks to reduce duplication in Pipe…
chandrasekharan-zipstack Feb 25, 2026
cb0f9bb
MISC [FIX] Suppress eslint import/no-unresolved for Webpack url import
chandrasekharan-zipstack Feb 26, 2026
921a1be
Merge branch 'main' into UN-3154-card-grid-view-pipelines-deployments
chandrasekharan-zipstack Feb 27, 2026
84632c2
Merge remote-tracking branch 'origin/main' into UN-3154-card-grid-vie…
chandrasekharan-zipstack Feb 27, 2026
786efce
UN-3154 [FIX] Replace native elements with AntD components per PR review
chandrasekharan-zipstack Feb 27, 2026
60f2e38
[FIX] Make Vite optional plugin emit throw instead of empty export
chandrasekharan-zipstack Feb 27, 2026
f4c479f
[MISC] Fix biome v2 lint and format issues in card grid view files
chandrasekharan-zipstack Feb 27, 2026
ed07793
UN-3154 [REFACTOR] Replace divs/spans with AntD components, remove du…
chandrasekharan-zipstack Feb 27, 2026
e5ed2dd
UN-3154 [FIX] Fix tooltip icon colors, move lazy imports to top-level
chandrasekharan-zipstack Feb 27, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 22 additions & 2 deletions backend/api_v2/api_deployment_views.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import Any

from configuration.models import Configuration
from django.db.models import QuerySet
from django.db.models import F, OuterRef, QuerySet, Subquery
from django.http import HttpResponse
from permissions.permission import IsOwner, IsOwnerOrSharedUserOrSharedToOrg
from plugins import get_plugin
Expand All @@ -16,7 +16,9 @@
from rest_framework.serializers import Serializer
from tool_instance_v2.models import ToolInstance
from utils.enums import CeleryTaskState
from utils.pagination import CustomPagination
from workflow_manager.workflow_v2.dto import ExecutionResponse
from workflow_manager.workflow_v2.models.execution import WorkflowExecution

from api_v2.api_deployment_dto_registry import ApiDeploymentDTORegistry
from api_v2.constants import ApiExecution
Expand Down Expand Up @@ -243,19 +245,37 @@ def get(


class APIDeploymentViewSet(viewsets.ModelViewSet):
pagination_class = CustomPagination

def get_permissions(self) -> list[Any]:
if self.action in ["destroy", "partial_update", "update"]:
return [IsOwner()]
return [IsOwnerOrSharedUserOrSharedToOrg()]

def get_queryset(self) -> QuerySet | None:
queryset = APIDeployment.objects.for_user(self.request.user)
# Subquery to get last run time for ordering
last_run_subquery = (
WorkflowExecution.objects.filter(pipeline_id=OuterRef("id"))
.order_by("-created_at")
.values("created_at")[:1]
)

queryset = (
APIDeployment.objects.for_user(self.request.user)
.annotate(last_run_time_annotated=Subquery(last_run_subquery))
.order_by(F("last_run_time_annotated").desc(nulls_last=True))
)

# Filter by workflow ID if provided
workflow_filter = self.request.query_params.get("workflow", None)
if workflow_filter:
queryset = queryset.filter(workflow_id=workflow_filter)

# Search by display name
search = self.request.query_params.get("search", None)
if search:
queryset = queryset.filter(display_name__icontains=search)

return queryset
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit:
If you are only returning the queryset, adjust the api header response accordingly.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't get this, do you mind clarifying more?


def get_serializer_class(self) -> serializers.Serializer:
Expand Down
23 changes: 23 additions & 0 deletions backend/api_v2/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,9 @@ def validate_execution_id(self, value):
class APIDeploymentListSerializer(ModelSerializer):
workflow_name = CharField(source="workflow.workflow_name", read_only=True)
created_by_email = SerializerMethodField()
last_5_run_statuses = SerializerMethodField()
run_count = SerializerMethodField()
last_run_time = SerializerMethodField()

class Meta:
model = APIDeployment
Expand All @@ -439,12 +442,32 @@ class Meta:
"api_name",
"created_by",
"created_by_email",
"last_5_run_statuses",
"run_count",
"last_run_time",
]

def get_created_by_email(self, obj):
"""Get the email of the creator."""
return obj.created_by.email if obj.created_by else None

def get_run_count(self, instance) -> int:
"""Get total execution count for this API deployment."""
return WorkflowExecution.objects.filter(pipeline_id=instance.id).count()

def get_last_run_time(self, instance) -> str | None:
"""Get the timestamp of the most recent execution."""
last_execution = (
WorkflowExecution.objects.filter(pipeline_id=instance.id)
.order_by("-created_at")
.first()
)
return last_execution.created_at.isoformat() if last_execution else None

def get_last_5_run_statuses(self, instance) -> list[dict]:
"""Fetch the last 5 execution statuses with timestamps for this API deployment."""
return WorkflowExecution.get_last_run_statuses(instance.id, limit=5)


class APIKeyListSerializer(ModelSerializer):
class Meta:
Expand Down
3 changes: 1 addition & 2 deletions backend/dashboard_metrics/services.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from usage_v2.models import Usage
from workflow_manager.file_execution.models import WorkflowFileExecution
from workflow_manager.workflow_v2.models.execution import WorkflowExecution
from workflow_manager.workflow_v2.models.workflow import Workflow

from dashboard_metrics.models import Granularity
from unstract.core.data_models import ExecutionStatus
Expand Down Expand Up @@ -716,8 +717,6 @@ def get_workflow_token_usage(
List of dicts with workflow_id, workflow_name, total_tokens,
total_cost, and call_count, ordered by total_tokens descending.
"""
from workflow_manager.workflow_v2.models.workflow import Workflow

# Query Usage grouped by workflow_id
usage_qs = (
_get_usage_queryset()
Expand Down
5 changes: 2 additions & 3 deletions backend/dashboard_metrics/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,12 @@
from datetime import datetime, timedelta
from typing import Any

from account_v2.models import Organization
from celery import shared_task
from django.core.cache import cache
from django.db.utils import DatabaseError, OperationalError
from django.utils import timezone
from workflow_manager.workflow_v2.models.execution import WorkflowExecution

from .models import (
EventMetricsDaily,
Expand Down Expand Up @@ -241,9 +243,6 @@ def _run_aggregation() -> dict[str, Any]:

Separated from the task function to keep the lock management clean.
"""
from account_v2.models import Organization
from workflow_manager.workflow_v2.models.execution import WorkflowExecution

end_date = timezone.now()

# Query windows for each granularity
Expand Down
6 changes: 1 addition & 5 deletions backend/dashboard_metrics/tests/test_tasks.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Unit tests for Dashboard Metrics Celery tasks."""

import uuid
from datetime import timedelta
from datetime import datetime, timedelta

from django.test import TestCase, TransactionTestCase
from django.utils import timezone
Expand Down Expand Up @@ -37,7 +37,6 @@ def test_truncate_to_hour_from_timestamp(self):

def test_truncate_to_hour_from_datetime(self):
"""Test truncating a datetime to the hour."""
from datetime import datetime

dt = datetime(2024, 1, 15, 14, 35, 22, tzinfo=timezone.utc)
result = _truncate_to_hour(dt)
Expand All @@ -49,7 +48,6 @@ def test_truncate_to_hour_from_datetime(self):

def test_truncate_to_hour_naive_datetime(self):
"""Test truncating a naive datetime makes it aware."""
from datetime import datetime

dt = datetime(2024, 1, 15, 14, 35, 22)
result = _truncate_to_hour(dt)
Expand All @@ -60,7 +58,6 @@ def test_truncate_to_hour_naive_datetime(self):

def test_truncate_to_day(self):
"""Test truncating a datetime to midnight."""
from datetime import datetime

dt = datetime(2024, 1, 15, 14, 35, 22, tzinfo=timezone.utc)
result = _truncate_to_day(dt)
Expand All @@ -73,7 +70,6 @@ def test_truncate_to_day(self):

def test_truncate_to_month(self):
"""Test truncating a datetime to first day of month."""
from datetime import datetime

dt = datetime(2024, 1, 15, 14, 35, 22, tzinfo=timezone.utc)
result = _truncate_to_month(dt)
Expand Down
2 changes: 0 additions & 2 deletions backend/dashboard_metrics/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,8 +403,6 @@ def series(self, request: Request) -> Response:
)

# Apply granularity-based grouping to cached records
from datetime import datetime

trunc_funcs = {
Granularity.HOUR: lambda ts: ts.replace(
minute=0, second=0, microsecond=0
Expand Down
30 changes: 30 additions & 0 deletions backend/pipeline_v2/serializers/crud.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
import logging
from collections import OrderedDict
from datetime import datetime
from typing import Any

from croniter import croniter
from django.conf import settings
from django.utils import timezone
from pipeline_v2.constants import PipelineConstants as PC
from pipeline_v2.constants import PipelineKey as PK
from pipeline_v2.constants import PipelineScheduling
Expand All @@ -14,6 +16,7 @@
from utils.serializer.integrity_error_mixin import IntegrityErrorMixin
from utils.serializer_utils import SerializerUtils
from workflow_manager.endpoint_v2.models import WorkflowEndpoint
from workflow_manager.workflow_v2.models.execution import WorkflowExecution

from backend.serializers import AuditSerializer
from unstract.connectors.connectorkit import Connectorkit
Expand All @@ -25,6 +28,8 @@
class PipelineSerializer(IntegrityErrorMixin, AuditSerializer):
api_endpoint = SerializerMethodField()
created_by_email = SerializerMethodField()
last_5_run_statuses = SerializerMethodField()
next_run_time = SerializerMethodField()

class Meta:
model = Pipeline
Expand Down Expand Up @@ -200,6 +205,21 @@ def get_created_by_email(self, obj):
"""Get the creator's email address."""
return obj.created_by.email if obj.created_by else None

def get_last_5_run_statuses(self, instance: Pipeline) -> list[dict]:
"""Fetch the last 5 execution statuses with timestamps for this pipeline."""
return WorkflowExecution.get_last_run_statuses(instance.id, limit=5)

def get_next_run_time(self, instance: Pipeline) -> str | None:
"""Calculate next scheduled run time from cron expression."""
if not instance.cron_string or not instance.active:
return None
try:
cron = croniter(instance.cron_string, timezone.now())
next_run: datetime = cron.get_next(datetime)
return next_run.isoformat()
except Exception:
return None

def create(self, validated_data: dict[str, Any]) -> Any:
# TODO: Deduce pipeline type based on WF?
validated_data[PK.ACTIVE] = True
Expand Down Expand Up @@ -242,6 +262,8 @@ def _add_connector_data(
"""
repr[PC.SOURCE_NAME] = PC.NOT_CONFIGURED
repr[PC.DESTINATION_NAME] = PC.NOT_CONFIGURED
repr["source_instance_name"] = None
repr["destination_instance_name"] = None

for endpoint in workflow_endpoints:
if endpoint.endpoint_type == WorkflowEndpoint.EndpointType.SOURCE:
Expand All @@ -250,6 +272,10 @@ def _add_connector_data(
connectors=connectors,
connector_id=endpoint.connector_instance.connector_id,
)
# User-set connector instance name
repr["source_instance_name"] = (
endpoint.connector_instance.connector_name
)
elif endpoint.endpoint_type == WorkflowEndpoint.EndpointType.DESTINATION:
if (
endpoint.connection_type
Expand All @@ -263,6 +289,10 @@ def _add_connector_data(
connector_id=endpoint.connector_instance.connector_id,
)
)
# User-set connector instance name
repr["destination_instance_name"] = (
endpoint.connector_instance.connector_name
)
return repr

def to_representation(self, instance: Pipeline) -> OrderedDict[str, Any]:
Expand Down
21 changes: 20 additions & 1 deletion backend/pipeline_v2/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,18 @@
from api_v2.key_helper import KeyHelper
from api_v2.postman_collection.dto import PostmanCollection
from django.db import IntegrityError
from django.db.models import QuerySet
from django.db.models import F, QuerySet
from django.http import HttpResponse
from permissions.permission import IsOwner, IsOwnerOrSharedUserOrSharedToOrg
from plugins import get_plugin
from rest_framework import serializers, status, viewsets
from rest_framework.decorators import action
from rest_framework.filters import OrderingFilter
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.versioning import URLPathVersioning
from scheduler.helper import SchedulerHelper
from utils.pagination import CustomPagination

from pipeline_v2.constants import (
PipelineConstants,
Expand All @@ -43,6 +45,11 @@
class PipelineViewSet(viewsets.ModelViewSet):
versioning_class = URLPathVersioning
queryset = Pipeline.objects.all()
pagination_class = CustomPagination
filter_backends = [OrderingFilter]
ordering_fields = ["created_at", "last_run_time", "pipeline_name", "run_count"]
# Note: Default ordering with nulls_last is applied in get_queryset()
# DRF's ordering attribute doesn't support nulls_last natively

def get_permissions(self) -> list[Any]:
if self.action in ["destroy", "partial_update", "update"]:
Expand All @@ -65,6 +72,18 @@ def get_queryset(self) -> QuerySet:
if workflow_filter:
queryset = queryset.filter(workflow_id=workflow_filter)

# Search by pipeline name
search = self.request.query_params.get("search", None)
if search:
queryset = queryset.filter(pipeline_name__icontains=search)

# Apply default ordering: last_run_time desc (nulls last), then created_at desc
# This ensures pipelines with recent runs appear first, never-run pipelines at end
queryset = queryset.order_by(
F("last_run_time").desc(nulls_last=True),
F("created_at").desc(),
)

return queryset

def get_serializer_class(self) -> serializers.Serializer:
Expand Down
51 changes: 51 additions & 0 deletions backend/workflow_manager/workflow_v2/models/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

from workflow_manager.execution.dto import ExecutionCache
from workflow_manager.execution.execution_cache_utils import ExecutionCacheUtils
from workflow_manager.file_execution.models import WorkflowFileExecution
from workflow_manager.workflow_v2.enums import ExecutionStatus
from workflow_manager.workflow_v2.models import Workflow

Expand Down Expand Up @@ -370,3 +371,53 @@ def _handle_execution_cache(self):
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self._handle_execution_cache()

@classmethod
def get_last_run_statuses(cls, pipeline_id: uuid.UUID, limit: int = 5) -> list[dict]:
"""Fetch the last N execution statuses for a pipeline.

Computes PARTIAL_SUCCESS dynamically when execution completed but has
both successful and failed files.

Args:
pipeline_id: UUID of the pipeline (ETL or API deployment)
limit: Number of recent executions to fetch (default 5)

Returns:
List of dicts with execution_id, status, timestamp, and file counts.
Ordered oldest to newest (for left-to-right timeline display).
"""
executions = cls.objects.filter(pipeline_id=pipeline_id).order_by("-created_at")[
:limit
]

result = []
for e in executions:
# TODO: Optimize by storing successful/failed counts directly in
# WorkflowExecution model. Current approach causes N+1 queries
# (2 queries per execution). Denormalized counts would eliminate
# these queries entirely.
successful = WorkflowFileExecution.objects.filter(
workflow_execution_id=e.id, status="COMPLETED"
).count()
failed = WorkflowFileExecution.objects.filter(
workflow_execution_id=e.id, status="ERROR"
).count()

# Compute display_status: PARTIAL_SUCCESS if completed with mixed results
display_status = e.status
if e.status == "COMPLETED" and failed > 0 and successful > 0:
display_status = "PARTIAL_SUCCESS"

result.append(
{
"execution_id": str(e.id),
"status": display_status,
"timestamp": e.created_at.isoformat() if e.created_at else None,
"successful_files": successful,
"failed_files": failed,
}
)

# Reverse to get oldest first (left-to-right timeline)
return list(reversed(result))
Loading
Loading