77from pangea .asyncio .services .base import ServiceBaseAsync
88from pangea .config import PangeaConfig
99from pangea .response import PangeaResponse
10- from pangea .services .ai_guard import TextGuardResult
10+ from pangea .services .ai_guard import LogFields , TextGuardResult
1111
1212_T = TypeVar ("_T" )
1313
@@ -54,10 +54,12 @@ def __init__(
5454 @overload
5555 async def guard_text (
5656 self ,
57- text_or_messages : str ,
57+ text : str ,
5858 * ,
59- recipe : str = "pangea_prompt_guard" ,
60- debug : bool = False ,
59+ recipe : str | None = None ,
60+ debug : bool | None = None ,
61+ llm_info : str | None = None ,
62+ log_fields : LogFields | None = None ,
6163 ) -> PangeaResponse [TextGuardResult [None ]]:
6264 """
6365 Text Guard for scanning LLM inputs and outputs
@@ -76,6 +78,8 @@ async def guard_text(
7678 are to be applied to the text, such as defang malicious URLs.
7779 debug: Setting this value to true will provide a detailed analysis
7880 of the text data
81+ llm_info: Short string hint for the LLM Provider information
82+ log_field: Additional fields to include in activity log
7983
8084 Examples:
8185 response = await ai_guard.guard_text("text")
@@ -84,10 +88,12 @@ async def guard_text(
8488 @overload
8589 async def guard_text (
8690 self ,
87- text_or_messages : _T ,
8891 * ,
89- recipe : str = "pangea_prompt_guard" ,
90- debug : bool = False ,
92+ messages : _T ,
93+ recipe : str | None = None ,
94+ debug : bool | None = None ,
95+ llm_info : str | None = None ,
96+ log_fields : LogFields | None = None ,
9197 ) -> PangeaResponse [TextGuardResult [_T ]]:
9298 """
9399 Text Guard for scanning LLM inputs and outputs
@@ -98,27 +104,31 @@ async def guard_text(
98104 OperationId: ai_guard_post_v1_text_guard
99105
100106 Args:
101- text_or_messages: Structured data to be scanned by AI Guard for PII,
102- sensitive data, malicious content, and other data types defined
103- by the configuration. Supports processing up to 10KB of text.
107+ messages: Structured messages data to be scanned by AI Guard for
108+ PII, sensitive data, malicious content, and other data types
109+ defined by the configuration. Supports processing up to 10KB of
110+ JSON text
104111 recipe: Recipe key of a configuration of data types and settings
105112 defined in the Pangea User Console. It specifies the rules that
106113 are to be applied to the text, such as defang malicious URLs.
107114 debug: Setting this value to true will provide a detailed analysis
108115 of the text data
116+ llm_info: Short string hint for the LLM Provider information
117+ log_field: Additional fields to include in activity log
109118
110119 Examples:
111- response = await ai_guard.guard_text([
112- {"role": "user", "content": "hello world"}
113- ])
120+ response = await ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
114121 """
115122
123+ @overload
116124 async def guard_text (
117125 self ,
118- text_or_messages : str | _T ,
119126 * ,
120- recipe : str = "pangea_prompt_guard" ,
121- debug : bool = False ,
127+ llm_input : _T ,
128+ recipe : str | None = None ,
129+ debug : bool | None = None ,
130+ llm_info : str | None = None ,
131+ log_fields : LogFields | None = None ,
122132 ) -> PangeaResponse [TextGuardResult [_T ]]:
123133 """
124134 Text Guard for scanning LLM inputs and outputs
@@ -129,25 +139,83 @@ async def guard_text(
129139 OperationId: ai_guard_post_v1_text_guard
130140
131141 Args:
132- text_or_messages: Text or structured data to be scanned by AI Guard
133- for PII, sensitive data, malicious content, and other data types
134- defined by the configuration. Supports processing up to 10KB of text.
142+ llm_input: Structured full llm payload data to be scanned by AI
143+ Guard for PII, sensitive data, malicious content, and other data
144+ types defined by the configuration. Supports processing up to
145+ 10KB of JSON text
135146 recipe: Recipe key of a configuration of data types and settings
136147 defined in the Pangea User Console. It specifies the rules that
137148 are to be applied to the text, such as defang malicious URLs.
138149 debug: Setting this value to true will provide a detailed analysis
139150 of the text data
151+ llm_info: Short string hint for the LLM Provider information
152+ log_field: Additional fields to include in activity log
153+
154+ Examples:
155+ response = await ai_guard.guard_text(
156+ llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
157+ )
158+ """
159+
160+ async def guard_text ( # type: ignore[misc]
161+ self ,
162+ text : str | None = None ,
163+ * ,
164+ messages : _T | None = None ,
165+ llm_input : _T | None = None ,
166+ recipe : str | None = None ,
167+ debug : bool | None = None ,
168+ llm_info : str | None = None ,
169+ log_fields : LogFields | None = None ,
170+ ) -> PangeaResponse [TextGuardResult [None ]]:
171+ """
172+ Text Guard for scanning LLM inputs and outputs
173+
174+ Analyze and redact text to avoid manipulation of the model, addition of
175+ malicious content, and other undesirable data transfers.
176+
177+ OperationId: ai_guard_post_v1_text_guard
178+
179+ Args:
180+ text: Text to be scanned by AI Guard for PII, sensitive data,
181+ malicious content, and other data types defined by the
182+ configuration. Supports processing up to 10KB of text.
183+ messages: Structured messages data to be scanned by AI Guard for
184+ PII, sensitive data, malicious content, and other data types
185+ defined by the configuration. Supports processing up to 10KB of
186+ JSON text
187+ llm_input: Structured full llm payload data to be scanned by AI
188+ Guard for PII, sensitive data, malicious content, and other data
189+ types defined by the configuration. Supports processing up to
190+ 10KB of JSON text
191+ recipe: Recipe key of a configuration of data types and settings
192+ defined in the Pangea User Console. It specifies the rules that
193+ are to be applied to the text, such as defang malicious URLs.
194+ debug: Setting this value to true will provide a detailed analysis
195+ of the text data
196+ llm_info: Short string hint for the LLM Provider information
197+ log_field: Additional fields to include in activity log
140198
141199 Examples:
142200 response = await ai_guard.guard_text("text")
143201 """
144202
203+ if not any ((text , messages , llm_input )):
204+ raise ValueError ("Exactly one of `text`, `messages`, or `llm_input` must be given" )
205+
206+ if sum ((text is not None , messages is not None , llm_input is not None )) > 1 :
207+ raise ValueError ("Only one of `text`, `messages`, or `llm_input` can be given at once" )
208+
145209 return await self .request .post (
146210 "v1/text/guard" ,
147211 TextGuardResult ,
148212 data = {
149- "text" if isinstance (text_or_messages , str ) else "messages" : text_or_messages ,
213+ "text" : text ,
214+ "messages" : messages ,
215+ "llm_input" : llm_input ,
150216 "recipe" : recipe ,
151217 "debug" : debug ,
218+ "llm_info" : llm_info ,
219+ "log_fields" : log_fields ,
152220 },
153221 )
0 commit comments