|
5 | 5 | from litellm import ModelResponse |
6 | 6 | from litellm.types.utils import Delta, StreamingChoices |
7 | 7 |
|
8 | | -from codegate.pipeline.base import CodeSnippet, PipelineContext |
| 8 | +from codegate.pipeline.base import AlertSeverity, CodeSnippet, PipelineContext |
9 | 9 | from codegate.pipeline.extract_snippets.extract_snippets import extract_snippets |
10 | 10 | from codegate.pipeline.output import OutputPipelineContext, OutputPipelineStep |
11 | 11 | from codegate.storage import StorageEngine |
@@ -85,6 +85,11 @@ async def _snippet_comment(self, snippet: CodeSnippet, context: PipelineContext) |
85 | 85 | archived packages: {libobjects_text}\n" |
86 | 86 | comment += "\n### 🚨 Warnings\n" + "\n".join(warnings) + "\n" |
87 | 87 |
|
| 88 | + # Add an alert to the context |
| 89 | + context.add_alert( |
| 90 | + self.name, trigger_string=comment, severity_category=AlertSeverity.CRITICAL |
| 91 | + ) |
| 92 | + |
88 | 93 | return comment |
89 | 94 |
|
90 | 95 | def _split_chunk_at_code_end(self, content: str) -> tuple[str, str]: |
@@ -147,9 +152,6 @@ async def process_chunk( |
147 | 152 | chunks.append(self._create_chunk(chunk, after)) |
148 | 153 | complete_comment += after |
149 | 154 |
|
150 | | - # Add an alert to the context |
151 | | - input_context.add_alert(self.name, trigger_string=complete_comment) |
152 | | - |
153 | 155 | return chunks |
154 | 156 |
|
155 | 157 | # Pass through all other content that does not create a new snippet |
|
0 commit comments