Skip to content

Commit 4538dc0

Browse files
committed
pangea-sdk: remove llm_info and llm_input from AI Guard
1 parent 036e398 commit 4538dc0

File tree

6 files changed

+9
-127
lines changed

6 files changed

+9
-127
lines changed

.github/CODEOWNERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
* @pangeacyber/sdks @doubletooth
1+
* @pangeacyber/sdks

CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1515

1616
- Redact: `score` in `RecognizerResult` is now a float.
1717

18+
### Removed
19+
20+
- AI Guard: `llm_info` and `llm_input`.
21+
1822
## 5.5.1 - 2025-02-17
1923

2024
### Changed

packages/pangea-sdk/pangea/asyncio/services/ai_guard.py

Lines changed: 2 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,6 @@ async def guard_text(
5858
*,
5959
recipe: str | None = None,
6060
debug: bool | None = None,
61-
llm_info: str | None = None,
6261
log_fields: LogFields | None = None,
6362
) -> PangeaResponse[TextGuardResult[None]]:
6463
"""
@@ -78,7 +77,6 @@ async def guard_text(
7877
are to be applied to the text, such as defang malicious URLs.
7978
debug: Setting this value to true will provide a detailed analysis
8079
of the text data
81-
llm_info: Short string hint for the LLM Provider information
8280
log_field: Additional fields to include in activity log
8381
8482
Examples:
@@ -92,7 +90,6 @@ async def guard_text(
9290
messages: _T,
9391
recipe: str | None = None,
9492
debug: bool | None = None,
95-
llm_info: str | None = None,
9693
log_fields: LogFields | None = None,
9794
) -> PangeaResponse[TextGuardResult[_T]]:
9895
"""
@@ -113,59 +110,19 @@ async def guard_text(
113110
are to be applied to the text, such as defang malicious URLs.
114111
debug: Setting this value to true will provide a detailed analysis
115112
of the text data
116-
llm_info: Short string hint for the LLM Provider information
117113
log_field: Additional fields to include in activity log
118114
119115
Examples:
120116
response = await ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
121117
"""
122118

123-
@overload
124-
async def guard_text(
125-
self,
126-
*,
127-
llm_input: _T,
128-
recipe: str | None = None,
129-
debug: bool | None = None,
130-
llm_info: str | None = None,
131-
log_fields: LogFields | None = None,
132-
) -> PangeaResponse[TextGuardResult[_T]]:
133-
"""
134-
Text Guard for scanning LLM inputs and outputs
135-
136-
Analyze and redact text to avoid manipulation of the model, addition of
137-
malicious content, and other undesirable data transfers.
138-
139-
OperationId: ai_guard_post_v1_text_guard
140-
141-
Args:
142-
llm_input: Structured full llm payload data to be scanned by AI
143-
Guard for PII, sensitive data, malicious content, and other data
144-
types defined by the configuration. Supports processing up to
145-
10KB of JSON text
146-
recipe: Recipe key of a configuration of data types and settings
147-
defined in the Pangea User Console. It specifies the rules that
148-
are to be applied to the text, such as defang malicious URLs.
149-
debug: Setting this value to true will provide a detailed analysis
150-
of the text data
151-
llm_info: Short string hint for the LLM Provider information
152-
log_field: Additional fields to include in activity log
153-
154-
Examples:
155-
response = await ai_guard.guard_text(
156-
llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
157-
)
158-
"""
159-
160119
async def guard_text( # type: ignore[misc]
161120
self,
162121
text: str | None = None,
163122
*,
164123
messages: _T | None = None,
165-
llm_input: _T | None = None,
166124
recipe: str | None = None,
167125
debug: bool | None = None,
168-
llm_info: str | None = None,
169126
log_fields: LogFields | None = None,
170127
) -> PangeaResponse[TextGuardResult[None]]:
171128
"""
@@ -184,38 +141,28 @@ async def guard_text( # type: ignore[misc]
184141
PII, sensitive data, malicious content, and other data types
185142
defined by the configuration. Supports processing up to 10KB of
186143
JSON text
187-
llm_input: Structured full llm payload data to be scanned by AI
188-
Guard for PII, sensitive data, malicious content, and other data
189-
types defined by the configuration. Supports processing up to
190-
10KB of JSON text
191144
recipe: Recipe key of a configuration of data types and settings
192145
defined in the Pangea User Console. It specifies the rules that
193146
are to be applied to the text, such as defang malicious URLs.
194147
debug: Setting this value to true will provide a detailed analysis
195148
of the text data
196-
llm_info: Short string hint for the LLM Provider information
197149
log_field: Additional fields to include in activity log
198150
199151
Examples:
200152
response = await ai_guard.guard_text("text")
201153
"""
202154

203-
if not any((text, messages, llm_input)):
204-
raise ValueError("Exactly one of `text`, `messages`, or `llm_input` must be given")
205-
206-
if sum((text is not None, messages is not None, llm_input is not None)) > 1:
207-
raise ValueError("Only one of `text`, `messages`, or `llm_input` can be given at once")
155+
if text is not None and messages is not None:
156+
raise ValueError("Exactly one of `text` or `messages` must be given")
208157

209158
return await self.request.post(
210159
"v1/text/guard",
211160
TextGuardResult,
212161
data={
213162
"text": text,
214163
"messages": messages,
215-
"llm_input": llm_input,
216164
"recipe": recipe,
217165
"debug": debug,
218-
"llm_info": llm_info,
219166
"log_fields": log_fields,
220167
},
221168
)

packages/pangea-sdk/pangea/services/ai_guard.py

Lines changed: 2 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,6 @@ def guard_text(
160160
*,
161161
recipe: str | None = None,
162162
debug: bool | None = None,
163-
llm_info: str | None = None,
164163
log_fields: LogFields | None = None,
165164
) -> PangeaResponse[TextGuardResult[None]]:
166165
"""
@@ -180,7 +179,6 @@ def guard_text(
180179
are to be applied to the text, such as defang malicious URLs.
181180
debug: Setting this value to true will provide a detailed analysis
182181
of the text data
183-
llm_info: Short string hint for the LLM Provider information
184182
log_field: Additional fields to include in activity log
185183
186184
Examples:
@@ -194,7 +192,6 @@ def guard_text(
194192
messages: _T,
195193
recipe: str | None = None,
196194
debug: bool | None = None,
197-
llm_info: str | None = None,
198195
log_fields: LogFields | None = None,
199196
) -> PangeaResponse[TextGuardResult[_T]]:
200197
"""
@@ -215,59 +212,19 @@ def guard_text(
215212
are to be applied to the text, such as defang malicious URLs.
216213
debug: Setting this value to true will provide a detailed analysis
217214
of the text data
218-
llm_info: Short string hint for the LLM Provider information
219215
log_field: Additional fields to include in activity log
220216
221217
Examples:
222218
response = ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
223219
"""
224220

225-
@overload
226-
def guard_text(
227-
self,
228-
*,
229-
llm_input: _T,
230-
recipe: str | None = None,
231-
debug: bool | None = None,
232-
llm_info: str | None = None,
233-
log_fields: LogFields | None = None,
234-
) -> PangeaResponse[TextGuardResult[_T]]:
235-
"""
236-
Text Guard for scanning LLM inputs and outputs
237-
238-
Analyze and redact text to avoid manipulation of the model, addition of
239-
malicious content, and other undesirable data transfers.
240-
241-
OperationId: ai_guard_post_v1_text_guard
242-
243-
Args:
244-
llm_input: Structured full llm payload data to be scanned by AI
245-
Guard for PII, sensitive data, malicious content, and other data
246-
types defined by the configuration. Supports processing up to
247-
10KB of JSON text
248-
recipe: Recipe key of a configuration of data types and settings
249-
defined in the Pangea User Console. It specifies the rules that
250-
are to be applied to the text, such as defang malicious URLs.
251-
debug: Setting this value to true will provide a detailed analysis
252-
of the text data
253-
llm_info: Short string hint for the LLM Provider information
254-
log_field: Additional fields to include in activity log
255-
256-
Examples:
257-
response = ai_guard.guard_text(
258-
llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
259-
)
260-
"""
261-
262221
def guard_text( # type: ignore[misc]
263222
self,
264223
text: str | None = None,
265224
*,
266225
messages: _T | None = None,
267-
llm_input: _T | None = None,
268226
recipe: str | None = None,
269227
debug: bool | None = None,
270-
llm_info: str | None = None,
271228
log_fields: LogFields | None = None,
272229
) -> PangeaResponse[TextGuardResult[None]]:
273230
"""
@@ -286,38 +243,28 @@ def guard_text( # type: ignore[misc]
286243
PII, sensitive data, malicious content, and other data types
287244
defined by the configuration. Supports processing up to 10KB of
288245
JSON text
289-
llm_input: Structured full llm payload data to be scanned by AI
290-
Guard for PII, sensitive data, malicious content, and other data
291-
types defined by the configuration. Supports processing up to
292-
10KB of JSON text
293246
recipe: Recipe key of a configuration of data types and settings
294247
defined in the Pangea User Console. It specifies the rules that
295248
are to be applied to the text, such as defang malicious URLs.
296249
debug: Setting this value to true will provide a detailed analysis
297250
of the text data
298-
llm_info: Short string hint for the LLM Provider information
299251
log_field: Additional fields to include in activity log
300252
301253
Examples:
302254
response = ai_guard.guard_text("text")
303255
"""
304256

305-
if not any((text, messages, llm_input)):
306-
raise ValueError("At least one of `text`, `messages`, or `llm_input` must be given")
307-
308-
if sum((text is not None, messages is not None, llm_input is not None)) > 1:
309-
raise ValueError("Only one of `text`, `messages`, or `llm_input` can be given at once")
257+
if text is not None and messages is not None:
258+
raise ValueError("Exactly one of `text` or `messages` must be given")
310259

311260
return self.request.post(
312261
"v1/text/guard",
313262
TextGuardResult,
314263
data={
315264
"text": text,
316265
"messages": messages,
317-
"llm_input": llm_input,
318266
"recipe": recipe,
319267
"debug": debug,
320-
"llm_info": llm_info,
321268
"log_fields": log_fields,
322269
},
323270
)

packages/pangea-sdk/tests/integration/asyncio/test_ai_guard.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -44,11 +44,3 @@ async def test_text_guard_messages(self) -> None:
4444
assert response.status == "Success"
4545
assert response.result
4646
assert response.result.prompt_messages
47-
48-
async def test_text_guard_llm_input(self) -> None:
49-
response = await self.client.guard_text(
50-
llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
51-
)
52-
assert response.status == "Success"
53-
assert response.result
54-
assert response.result.prompt_messages

packages/pangea-sdk/tests/integration/test_ai_guard.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,3 @@ def test_text_guard_messages(self) -> None:
3737
assert response.status == "Success"
3838
assert response.result
3939
assert response.result.prompt_messages
40-
41-
def test_text_guard_llm_input(self) -> None:
42-
response = self.client.guard_text(
43-
llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
44-
)
45-
assert response.status == "Success"
46-
assert response.result
47-
assert response.result.prompt_messages

0 commit comments

Comments
 (0)