Skip to content

Commit

Permalink
feat: Granules Sort by time or other properties (#532)
Browse files Browse the repository at this point in the history
* breaking: using latest uds-lib + update docker

* feat: use latest uds-lib

* chore: allow local ES

* fix: add es type

* fix: adding a no-ssl option

* feat: optionally omitting cumulus in collection creation

* feat: split archive api to a different file to simplify documentation

* feat: move one more archive api

* fix: add granule addition (w/o cumulus)

* feat: allow cross collection query + bbox

* feat: Api documentation (#522)

* chore: add documentation

* chore: update names

* chore: update tag

* fix: boto3 s3 transfer lib issue

* feat: add sort by argument

* update keyword to match ogc

* fix: bbox need to check for None type

* chore: dummy
  • Loading branch information
wphyojpl authored Feb 10, 2025
1 parent 3ece102 commit 568ee45
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 9 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ Questions about our project? Please see our: [FAQ]([INSERT LINK TO FAQ / DISCUSS
1. Question 1
- Answer to question 1.
2. Question 2
- Answer to question 2
- Answer to question 2.
-->

<!-- example FAQ inline with no questions yet>
Expand Down
36 changes: 31 additions & 5 deletions cumulus_lambda_functions/uds_api/dapa/granules_dapa_query_es.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@


class GranulesDapaQueryEs:
def __init__(self, collection_id, limit, offset, input_datetime, filter_input, pagination_link_obj: PaginationLinksGenerator, base_url, bbox=None):
def __init__(self, collection_id, limit, offset, input_datetime, filter_input, pagination_link_obj: PaginationLinksGenerator, base_url, bbox=None, sort_by=None):
self.__collection_cnm_lambda_name = os.environ.get('COLLECTION_CREATION_LAMBDA_NAME', '').strip()
self.__pagination_link_obj = pagination_link_obj
self.__input_datetime = input_datetime
Expand All @@ -31,6 +31,35 @@ def __init__(self, collection_id, limit, offset, input_datetime, filter_input, p
self.__filter_input = filter_input
self.__granules_index = GranulesDbIndex()
self.__bbox = bbox
self.__sort_by = sort_by

def get_sorting_arguments(self):
if self.__sort_by is None or self.__sort_by == '':
return [
{'properties.datetime': {'order': 'desc'}},
{'id': {'order': 'asc'}}
]
sorting_dict = {}
sort_keys = [k.strip() for k in self.__sort_by.split(',')]
for each_key in sort_keys:
if each_key.startswith('+'):
sorting_dict[each_key[1:]] = {'order': 'asc'}
elif each_key.startswith('-'):
sorting_dict[each_key[1:]] = {'order': 'desc'}
else:
sorting_dict[each_key] = {'order': 'asc'}
if 'properties.datetime' not in sorting_dict:
sorting_dict['properties.datetime'] = {'order': 'desc'}
if 'id' not in sorting_dict:
sorting_dict['id'] = {'order': 'asc'}

sorting_array = [
{'properties.datetime': sorting_dict.pop('properties.datetime')},
{'id': sorting_dict.pop('id')},
]
for k, v in sorting_dict.items():
sorting_array.append({k: v})
return sorting_array

def __generate_es_dsl(self):
query_terms = []
Expand All @@ -52,10 +81,7 @@ def __generate_es_dsl(self):
'track_total_hits': self.__offset is None,
'size': self.__limit,
# "collapse": {"field": "id"},
'sort': [
{'properties.datetime': {'order': 'desc'}},
{'id': {'order': 'asc'}}
],
'sort': self.get_sorting_arguments(),
'query': {
'bool': {
'must': query_terms
Expand Down
10 changes: 7 additions & 3 deletions cumulus_lambda_functions/uds_api/granules_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,11 @@ async def get_granules_dapa(request: Request, collection_id: str=Path(descriptio
offset: Union[str, None] = Query(None, description='Pagination Item from current page to get the next page'),
datetime: Union[str, None] = Query(None, description='Example: 2018-02-12T23:20:50Z'),
filter: Union[str, None] = Query(None, description="OGC CQL filters: https://portal.ogc.org/files/96288#rc_cql-text -- Example: id in (g1,g2,g3) and tags::core = 'level-3' and (time1 < 34 or time1 > 14)"),
bbox: Union[str, None]=Query(None, description='Bounding box in minx,miny,maxx,maxy -- Example: bbox=12.3,0.3,14.4,2.3')):
bbox: Union[str, None]=Query(None, description='Bounding box in minx,miny,maxx,maxy -- Example: bbox=12.3,0.3,14.4,2.3'),
sortby: Union[str, None]=Query(None, description='Sort the results based on the comma separated parameters, each sorting key can be started with + / - for ascending / descending order. missing operator is assumed "+". Example: sortby=+id,-properties.created'),
):
# https://docs.ogc.org/DRAFTS/24-030.html#sortby-parameter
# https://docs.ogc.org/DRAFTS/24-030.html#_declaring_default_sort_order
authorizer: UDSAuthorizorAbstract = UDSAuthorizerFactory() \
.get_instance(UDSAuthorizerFactory.cognito,
es_url=os.getenv('ES_URL'),
Expand All @@ -95,8 +99,8 @@ async def get_granules_dapa(request: Request, collection_id: str=Path(descriptio
try:
pagination_links = PaginationLinksGenerator(request)
api_base_prefix = FastApiUtils.get_api_base_prefix()
bbox_array = [float(k) for k in bbox.split(',')]
granules_dapa_query = GranulesDapaQueryEs(collection_id, limit, offset, datetime, filter, pagination_links, f'{pagination_links.base_url}/{api_base_prefix}', bbox_array)
bbox_array = [float(k) for k in bbox.split(',')] if bbox is not None else None
granules_dapa_query = GranulesDapaQueryEs(collection_id, limit, offset, datetime, filter, pagination_links, f'{pagination_links.base_url}/{api_base_prefix}', bbox_array, sortby)
granules_result = granules_dapa_query.start()
except Exception as e:
LOGGER.exception('failed during get_granules_dapa')
Expand Down

0 comments on commit 568ee45

Please sign in to comment.