@@ -57,6 +57,27 @@ def raise_if_failed(self):
5757 self .failure_details )
5858
5959
60+ @dataclass
61+ class OrchestrationQuery :
62+ created_time_from : Optional [datetime ] = None
63+ created_time_to : Optional [datetime ] = None
64+ runtime_status : Optional [List [OrchestrationStatus ]] = None
65+ # Some backends don't respond well with max_instance_count = None, so we use the integer limit for non-paginated
66+ # results instead.
67+ max_instance_count : Optional [int ] = (1 << 31 ) - 1
68+ fetch_inputs_and_outputs : bool = False
69+
70+
71+ @dataclass
72+ class EntityQuery :
73+ instance_id_starts_with : Optional [str ] = None
74+ last_modified_from : Optional [datetime ] = None
75+ last_modified_to : Optional [datetime ] = None
76+ include_state : bool = True
77+ include_transient : bool = False
78+ page_size : Optional [int ] = None
79+
80+
6081@dataclass
6182class PurgeInstancesResult :
6283 deleted_instance_count : int
@@ -170,46 +191,24 @@ def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = Tr
170191 return new_orchestration_state (req .instanceId , res )
171192
172193 def get_all_orchestration_states (self ,
173- max_instance_count : Optional [int ] = None ,
174- fetch_inputs_and_outputs : bool = False ) -> List [OrchestrationState ]:
175- return self .get_orchestration_state_by (
176- created_time_from = None ,
177- created_time_to = None ,
178- runtime_status = None ,
179- max_instance_count = max_instance_count ,
180- fetch_inputs_and_outputs = fetch_inputs_and_outputs
181- )
194+ orchestration_query : Optional [OrchestrationQuery ] = None
195+ ) -> List [OrchestrationState ]:
196+ if orchestration_query is None :
197+ orchestration_query = OrchestrationQuery ()
198+ _continuation_token = None
182199
183- def get_orchestration_state_by (self ,
184- created_time_from : Optional [datetime ] = None ,
185- created_time_to : Optional [datetime ] = None ,
186- runtime_status : Optional [List [OrchestrationStatus ]] = None ,
187- max_instance_count : Optional [int ] = None ,
188- fetch_inputs_and_outputs : bool = False ,
189- _continuation_token : Optional [pb2 .StringValue ] = None
190- ) -> List [OrchestrationState ]:
191- if max_instance_count is None :
192- # Some backends do not behave well with max_instance_count = None, so we set to max 32-bit signed value
193- max_instance_count = (1 << 31 ) - 1
194-
195- self ._logger .info (f"Querying orchestration instances with filters - "
196- f"created_time_from={ created_time_from } , "
197- f"created_time_to={ created_time_to } , "
198- f"runtime_status={ [str (status ) for status in runtime_status ] if runtime_status else None } , "
199- f"max_instance_count={ max_instance_count } , "
200- f"fetch_inputs_and_outputs={ fetch_inputs_and_outputs } , "
201- f"continuation_token={ _continuation_token .value if _continuation_token else None } " )
200+ self ._logger .info (f"Querying orchestration instances with query: { orchestration_query } " )
202201
203202 states = []
204203
205204 while True :
206205 req = pb .QueryInstancesRequest (
207206 query = pb .InstanceQuery (
208- runtimeStatus = [status .value for status in runtime_status ] if runtime_status else None ,
209- createdTimeFrom = helpers .new_timestamp (created_time_from ) if created_time_from else None ,
210- createdTimeTo = helpers .new_timestamp (created_time_to ) if created_time_to else None ,
211- maxInstanceCount = max_instance_count ,
212- fetchInputsAndOutputs = fetch_inputs_and_outputs ,
207+ runtimeStatus = [status .value for status in orchestration_query . runtime_status ] if orchestration_query . runtime_status else None ,
208+ createdTimeFrom = helpers .new_timestamp (orchestration_query . created_time_from ) if orchestration_query . created_time_from else None ,
209+ createdTimeTo = helpers .new_timestamp (orchestration_query . created_time_to ) if orchestration_query . created_time_to else None ,
210+ maxInstanceCount = orchestration_query . max_instance_count ,
211+ fetchInputsAndOutputs = orchestration_query . fetch_inputs_and_outputs ,
213212 continuationToken = _continuation_token
214213 )
215214 )
@@ -318,7 +317,6 @@ def purge_orchestrations_by(self,
318317 f"runtime_status={ [str (status ) for status in runtime_status ] if runtime_status else None } , "
319318 f"recursive={ recursive } " )
320319 resp : pb .PurgeInstancesResponse = self ._stub .PurgeInstances (pb .PurgeInstancesRequest (
321- instanceId = None ,
322320 purgeInstanceFilter = pb .PurgeInstanceFilter (
323321 createdTimeFrom = helpers .new_timestamp (created_time_from ) if created_time_from else None ,
324322 createdTimeTo = helpers .new_timestamp (created_time_to ) if created_time_to else None ,
@@ -357,46 +355,24 @@ def get_entity(self,
357355 return EntityMetadata .from_entity_metadata (res .entity , include_state )
358356
359357 def get_all_entities (self ,
360- include_state : bool = True ,
361- include_transient : bool = False ,
362- page_size : Optional [int ] = None ) -> List [EntityMetadata ]:
363- return self .get_entities_by (
364- instance_id_starts_with = None ,
365- last_modified_from = None ,
366- last_modified_to = None ,
367- include_state = include_state ,
368- include_transient = include_transient ,
369- page_size = page_size
370- )
358+ entity_query : Optional [EntityQuery ] = None ) -> List [EntityMetadata ]:
359+ if entity_query is None :
360+ entity_query = EntityQuery ()
361+ _continuation_token = None
371362
372- def get_entities_by (self ,
373- instance_id_starts_with : Optional [str ] = None ,
374- last_modified_from : Optional [datetime ] = None ,
375- last_modified_to : Optional [datetime ] = None ,
376- include_state : bool = True ,
377- include_transient : bool = False ,
378- page_size : Optional [int ] = None ,
379- _continuation_token : Optional [pb2 .StringValue ] = None
380- ) -> List [EntityMetadata ]:
381- self ._logger .info (f"Retrieving entities by filter: "
382- f"instance_id_starts_with={ instance_id_starts_with } , "
383- f"last_modified_from={ last_modified_from } , "
384- f"last_modified_to={ last_modified_to } , "
385- f"include_state={ include_state } , "
386- f"include_transient={ include_transient } , "
387- f"page_size={ page_size } " )
363+ self ._logger .info (f"Retrieving entities by filter: { entity_query } " )
388364
389365 entities = []
390366
391367 while True :
392368 query_request = pb .QueryEntitiesRequest (
393369 query = pb .EntityQuery (
394- instanceIdStartsWith = helpers .get_string_value (instance_id_starts_with ),
395- lastModifiedFrom = helpers .new_timestamp (last_modified_from ) if last_modified_from else None ,
396- lastModifiedTo = helpers .new_timestamp (last_modified_to ) if last_modified_to else None ,
397- includeState = include_state ,
398- includeTransient = include_transient ,
399- pageSize = helpers .get_int_value (page_size ),
370+ instanceIdStartsWith = helpers .get_string_value (entity_query . instance_id_starts_with ),
371+ lastModifiedFrom = helpers .new_timestamp (entity_query . last_modified_from ) if entity_query . last_modified_from else None ,
372+ lastModifiedTo = helpers .new_timestamp (entity_query . last_modified_to ) if entity_query . last_modified_to else None ,
373+ includeState = entity_query . include_state ,
374+ includeTransient = entity_query . include_transient ,
375+ pageSize = helpers .get_int_value (entity_query . page_size ),
400376 continuationToken = _continuation_token
401377 )
402378 )
@@ -414,13 +390,13 @@ def get_entities_by(self,
414390
415391 def clean_entity_storage (self ,
416392 remove_empty_entities : bool = True ,
417- release_orphaned_locks : bool = True ,
418- _continuation_token : Optional [pb2 .StringValue ] = None
393+ release_orphaned_locks : bool = True
419394 ) -> CleanEntityStorageResult :
420395 self ._logger .info ("Cleaning entity storage" )
421396
422397 empty_entities_removed = 0
423398 orphaned_locks_released = 0
399+ _continuation_token = None
424400
425401 while True :
426402 req = pb .CleanEntityStorageRequest (
0 commit comments