From 07886c980c417a2ba9a3d43f9c45d825c2e72ecb Mon Sep 17 00:00:00 2001 From: Jover Lee Date: Fri, 12 Jul 2024 11:46:26 -0700 Subject: [PATCH] git subrepo pull (merge) ingest/vendored subrepo: subdir: "ingest/vendored" merged: "258ab8c" upstream: origin: "https://github.com/nextstrain/ingest" branch: "main" commit: "258ab8c" git-subrepo: version: "0.4.6" origin: "https://github.com/ingydotnet/git-subrepo" commit: "110b9eb" --- ingest/vendored/.cramrc | 3 - ingest/vendored/.github/dependabot.yml | 17 ++ ingest/vendored/.github/workflows/ci.yaml | 10 +- .../.github/workflows/pre-commit.yaml | 14 ++ ingest/vendored/.gitrepo | 4 +- ingest/vendored/.pre-commit-config.yaml | 40 +++ ingest/vendored/README.md | 47 ++-- ingest/vendored/apply-geolocation-rules | 234 ------------------ ingest/vendored/merge-user-metadata | 55 ---- .../transform-strain-names.t | 17 -- ingest/vendored/transform-authors | 66 ----- ingest/vendored/transform-field-names | 48 ---- ingest/vendored/transform-genbank-location | 43 ---- ingest/vendored/transform-strain-names | 50 ---- 14 files changed, 103 insertions(+), 545 deletions(-) delete mode 100644 ingest/vendored/.cramrc create mode 100644 ingest/vendored/.github/dependabot.yml create mode 100644 ingest/vendored/.github/workflows/pre-commit.yaml create mode 100644 ingest/vendored/.pre-commit-config.yaml delete mode 100755 ingest/vendored/apply-geolocation-rules delete mode 100755 ingest/vendored/merge-user-metadata delete mode 100644 ingest/vendored/tests/transform-strain-names/transform-strain-names.t delete mode 100755 ingest/vendored/transform-authors delete mode 100755 ingest/vendored/transform-field-names delete mode 100755 ingest/vendored/transform-genbank-location delete mode 100755 ingest/vendored/transform-strain-names diff --git a/ingest/vendored/.cramrc b/ingest/vendored/.cramrc deleted file mode 100644 index 153d20f..0000000 --- a/ingest/vendored/.cramrc +++ /dev/null @@ -1,3 +0,0 @@ -[cram] -shell = /bin/bash -indent = 2 diff --git a/ingest/vendored/.github/dependabot.yml b/ingest/vendored/.github/dependabot.yml new file mode 100644 index 0000000..89bd084 --- /dev/null +++ b/ingest/vendored/.github/dependabot.yml @@ -0,0 +1,17 @@ +# Dependabot configuration file +# +# +# Each ecosystem is checked on a scheduled interval defined below. To trigger +# a check manually, go to +# +# https://github.com/nextstrain/ingest/network/updates +# +# and look for a "Check for updates" button. You may need to click around a +# bit first. +--- +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" diff --git a/ingest/vendored/.github/workflows/ci.yaml b/ingest/vendored/.github/workflows/ci.yaml index c6a218a..c716277 100644 --- a/ingest/vendored/.github/workflows/ci.yaml +++ b/ingest/vendored/.github/workflows/ci.yaml @@ -11,13 +11,5 @@ jobs: shellcheck: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: nextstrain/.github/actions/shellcheck@master - - cram: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - - run: pip install cram - - run: cram tests/ \ No newline at end of file diff --git a/ingest/vendored/.github/workflows/pre-commit.yaml b/ingest/vendored/.github/workflows/pre-commit.yaml new file mode 100644 index 0000000..70da533 --- /dev/null +++ b/ingest/vendored/.github/workflows/pre-commit.yaml @@ -0,0 +1,14 @@ +name: pre-commit + +on: + - push + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - uses: pre-commit/action@v3.0.1 diff --git a/ingest/vendored/.gitrepo b/ingest/vendored/.gitrepo index 3841ca7..de57276 100644 --- a/ingest/vendored/.gitrepo +++ b/ingest/vendored/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/nextstrain/ingest branch = main - commit = 7617c39fae05e5882c5e6c065c5b47d500c998af - parent = d49aeeb0a95f6336a516b3f25fa83a67d16bf56e + commit = 258ab8ce898a88089bc88caee336f8d683a0e79a + parent = b1e777a8e79748bb69e24feeb0a1bd8a29db4969 method = merge cmdver = 0.4.6 diff --git a/ingest/vendored/.pre-commit-config.yaml b/ingest/vendored/.pre-commit-config.yaml new file mode 100644 index 0000000..2cdf88b --- /dev/null +++ b/ingest/vendored/.pre-commit-config.yaml @@ -0,0 +1,40 @@ +default_language_version: + python: python3 +repos: + - repo: https://github.com/pre-commit/sync-pre-commit-deps + rev: v0.0.1 + hooks: + - id: sync-pre-commit-deps + - repo: https://github.com/shellcheck-py/shellcheck-py + rev: v0.10.0.1 + hooks: + - id: shellcheck + - repo: https://github.com/rhysd/actionlint + rev: v1.6.27 + hooks: + - id: actionlint + entry: env SHELLCHECK_OPTS='--exclude=SC2027' actionlint + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: trailing-whitespace + - id: check-ast + - id: check-case-conflict + - id: check-docstring-first + - id: check-json + - id: check-executables-have-shebangs + - id: check-merge-conflict + - id: check-shebang-scripts-are-executable + - id: check-symlinks + - id: check-toml + - id: check-yaml + - id: destroyed-symlinks + - id: detect-private-key + - id: end-of-file-fixer + - id: fix-byte-order-marker + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.4.6 + hooks: + # Run the linter. + - id: ruff diff --git a/ingest/vendored/README.md b/ingest/vendored/README.md index fa91891..a2b54cb 100644 --- a/ingest/vendored/README.md +++ b/ingest/vendored/README.md @@ -2,7 +2,7 @@ Shared internal tooling for pathogen data ingest. Used by our individual pathogen repos which produce Nextstrain builds. Expected to be vendored by -each pathogen repo using `git subtree`. +each pathogen repo using `git subrepo`. Some tools may only live here temporarily before finding a permanent home in `augur curate` or Nextstrain CLI. Others may happily live out their days here. @@ -12,6 +12,9 @@ Some tools may only live here temporarily before finding a permanent home in Nextstrain maintained pathogen repos will use [`git subrepo`](https://github.com/ingydotnet/git-subrepo) to vendor ingest scripts. (See discussion on this decision in https://github.com/nextstrain/ingest/issues/3) +For a list of Nextstrain repos that are currently using this method, use [this +GitHub code search](https://github.com/search?type=code&q=org%3Anextstrain+subrepo+%22remote+%3D+https%3A%2F%2Fgithub.com%2Fnextstrain%2Fingest%22). + If you don't already have `git subrepo` installed, follow the [git subrepo installation instructions](https://github.com/ingydotnet/git-subrepo#installation). Then add the latest ingest scripts to the pathogen repo by running: @@ -54,14 +57,14 @@ commit hash if needed. Much of this tooling originated in [ncov-ingest](https://github.com/nextstrain/ncov-ingest) and was passaged thru -[monkeypox's ingest/](https://github.com/nextstrain/monkeypox/tree/@/ingest/). -It subsequently proliferated from [monkeypox][] to other pathogen repos -([rsv][], [zika][], [dengue][], [hepatitisB][], [forecasts-ncov][]) primarily -thru copying. To [counter that +[mpox's ingest/](https://github.com/nextstrain/mpox/tree/@/ingest/). It +subsequently proliferated from [mpox][] to other pathogen repos ([rsv][], +[zika][], [dengue][], [hepatitisB][], [forecasts-ncov][]) primarily thru +copying. To [counter that proliferation](https://bedfordlab.slack.com/archives/C7SDVPBLZ/p1688577879947079), this repo was made. -[monkeypox]: https://github.com/nextstrain/monkeypox +[mpox]: https://github.com/nextstrain/mpox [rsv]: https://github.com/nextstrain/rsv [zika]: https://github.com/nextstrain/zika/pull/24 [dengue]: https://github.com/nextstrain/dengue/pull/10 @@ -114,15 +117,6 @@ Potential Nextstrain CLI scripts - [download-from-s3](download-from-s3) - Download file from AWS S3 bucket with decompression based on file extension in S3 URL. Skips download if the local file already exists and has a hash identical to the S3 object's metadata `sha256sum`. -Potential augur curate scripts - -- [apply-geolocation-rules](apply-geolocation-rules) - Applies user curated geolocation rules to NDJSON records -- [merge-user-metadata](merge-user-metadata) - Merges user annotations with NDJSON records -- [transform-authors](transform-authors) - Abbreviates full author lists to ' et al.' -- [transform-field-names](transform-field-names) - Rename fields of NDJSON records -- [transform-genbank-location](transform-genbank-location) - Parses `location` field with the expected pattern `"[:][, ]"` based on [GenBank's country field](https://www.ncbi.nlm.nih.gov/genbank/collab/country/) -- [transform-strain-names](transform-strain-names) - Ordered search for strain names across several fields. - ## Software requirements Some scripts may require Bash ≥4. If you are running these scripts on macOS, the builtin Bash (`/bin/bash`) does not meet this requirement. You can install [Homebrew's Bash](https://formulae.brew.sh/formula/bash) which is more up to date. @@ -131,7 +125,24 @@ Some scripts may require Bash ≥4. If you are running these scripts on macOS, t Most scripts are untested within this repo, relying on "testing in production". That is the only practical testing option for some scripts such as the ones interacting with S3 and Slack. -For more locally testable scripts, Cram-style functional tests live in `tests` and are run as part of CI. To run these locally, +## Working on this repo + +This repo is configured to use [pre-commit](https://pre-commit.com), +to help automatically catch common coding errors and syntax issues +with changes before they are committed to the repo. + +If you will be writing new code or otherwise working within this repo, +please do the following to get started: + +1. [install `pre-commit`](https://pre-commit.com/#install) by running + either `python -m pip install pre-commit` or `brew install + pre-commit`, depending on your preferred package management + solution +2. install the local git hooks by running `pre-commit install` from + the root of the repo +3. when problems are detected, correct them in your local working tree + before committing them. -1. Download Cram: `pip install cram` -2. Run the tests: `cram tests/` +Note that these pre-commit checks are also run in a GitHub Action when +changes are pushed to GitHub, so correcting issues locally will +prevent extra cycles of correction. diff --git a/ingest/vendored/apply-geolocation-rules b/ingest/vendored/apply-geolocation-rules deleted file mode 100755 index 776cf16..0000000 --- a/ingest/vendored/apply-geolocation-rules +++ /dev/null @@ -1,234 +0,0 @@ -#!/usr/bin/env python3 -""" -Applies user curated geolocation rules to the geolocation fields in the NDJSON -records from stdin. The modified records are output to stdout. This does not do -any additional transformations on top of the user curations. -""" -import argparse -import json -from collections import defaultdict -from sys import exit, stderr, stdin, stdout - - -class CyclicGeolocationRulesError(Exception): - pass - - -def load_geolocation_rules(geolocation_rules_file): - """ - Loads the geolocation rules from the provided *geolocation_rules_file*. - Returns the rules as a dict: - { - regions: { - countries: { - divisions: { - locations: corrected_geolocations_tuple - } - } - } - } - """ - geolocation_rules = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) - with open(geolocation_rules_file, 'r') as rules_fh: - for line in rules_fh: - # ignore comments - if line.strip()=="" or line.lstrip()[0] == '#': - continue - - row = line.strip('\n').split('\t') - # Skip lines that cannot be split into raw and annotated geolocations - if len(row) != 2: - print( - f"WARNING: Could not decode geolocation rule {line!r}.", - "Please make sure rules are formatted as", - "'region/country/division/locationregion/country/division/location'.", - file=stderr) - continue - - # remove trailing comments - row[-1] = row[-1].partition('#')[0].rstrip() - raw , annot = tuple( row[0].split('/') ) , tuple( row[1].split('/') ) - - # Skip lines where raw or annotated geolocations cannot be split into 4 fields - if len(raw) != 4: - print( - f"WARNING: Could not decode the raw geolocation {row[0]!r}.", - "Please make sure it is formatted as 'region/country/division/location'.", - file=stderr - ) - continue - - if len(annot) != 4: - print( - f"WARNING: Could not decode the annotated geolocation {row[1]!r}.", - "Please make sure it is formatted as 'region/country/division/location'.", - file=stderr - ) - continue - - - geolocation_rules[raw[0]][raw[1]][raw[2]][raw[3]] = annot - - return geolocation_rules - - -def get_annotated_geolocation(geolocation_rules, raw_geolocation, rule_traversal = None): - """ - Gets the annotated geolocation for the *raw_geolocation* in the provided - *geolocation_rules*. - - Recursively traverses the *geolocation_rules* until we get the annotated - geolocation, which must be a Tuple. Returns `None` if there are no - applicable rules for the provided *raw_geolocation*. - - Rules are applied in the order of region, country, division, location. - First checks the provided raw values for geolocation fields, then if there - are not matches, tries to use general rules marked with '*'. - """ - # Always instantiate the rule traversal as an empty list if not provided, - # e.g. the first call of this recursive function - if rule_traversal is None: - rule_traversal = [] - - current_rules = geolocation_rules - # Traverse the geolocation rules based using the rule_traversal values - for field_value in rule_traversal: - current_rules = current_rules.get(field_value) - # If we hit `None`, then we know there are no matching rules, so stop the rule traversal - if current_rules is None: - break - - # We've found the tuple of the annotated geolocation - if isinstance(current_rules, tuple): - return current_rules - - # We've reach the next level of geolocation rules, - # so try to traverse the rules with the next target in raw_geolocation - if isinstance(current_rules, dict): - next_traversal_target = raw_geolocation[len(rule_traversal)] - rule_traversal.append(next_traversal_target) - return get_annotated_geolocation(geolocation_rules, raw_geolocation, rule_traversal) - - # We did not find any matching rule for the last traversal target - if current_rules is None: - # If we've used all general rules and we still haven't found a match, - # then there are no applicable rules for this geolocation - if all(value == '*' for value in rule_traversal): - return None - - # If we failed to find matching rule with a general rule as the last - # traversal target, then delete all trailing '*'s to reset rule_traversal - # to end with the last index that is currently NOT a '*' - # [A, *, B, *] => [A, *, B] - # [A, B, *, *] => [A, B] - # [A, *, *, *] => [A] - if rule_traversal[-1] == '*': - # Find the index of the first of the consecutive '*' from the - # end of the rule_traversal - # [A, *, B, *] => first_consecutive_general_rule_index = 3 - # [A, B, *, *] => first_consecutive_general_rule_index = 2 - # [A, *, *, *] => first_consecutive_general_rule_index = 1 - for index, field_value in reversed(list(enumerate(rule_traversal))): - if field_value == '*': - first_consecutive_general_rule_index = index - else: - break - - rule_traversal = rule_traversal[:first_consecutive_general_rule_index] - - # Set the final value to '*' in hopes that by moving to a general rule, - # we can find a matching rule. - rule_traversal[-1] = '*' - - return get_annotated_geolocation(geolocation_rules, raw_geolocation, rule_traversal) - - -def transform_geolocations(geolocation_rules, geolocation): - """ - Transform the provided *geolocation* by looking it up in the provided - *geolocation_rules*. - - This will use all rules that apply to the geolocation and rules will - be applied in the order of region, country, division, location. - - Returns the original geolocation if no geolocation rules apply. - - Raises a `CyclicGeolocationRulesError` if more than 1000 rules have - been applied to the raw geolocation. - """ - transformed_values = geolocation - rules_applied = 0 - continue_to_apply = True - - while continue_to_apply: - annotated_values = get_annotated_geolocation(geolocation_rules, transformed_values) - - # Stop applying rules if no annotated values were found - if annotated_values is None: - continue_to_apply = False - else: - rules_applied += 1 - - if rules_applied > 1000: - raise CyclicGeolocationRulesError( - "ERROR: More than 1000 geolocation rules applied on the same entry {geolocation!r}." - ) - - # Create a new list of values for comparison to previous values - new_values = list(transformed_values) - for index, value in enumerate(annotated_values): - # Keep original value if annotated value is '*' - if value != '*': - new_values[index] = value - - # Stop applying rules if this rule did not change the values, - # since this means we've reach rules with '*' that no longer change values - if new_values == transformed_values: - continue_to_apply = False - - transformed_values = new_values - - return transformed_values - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument("--region-field", default="region", - help="Field that contains regions in NDJSON records.") - parser.add_argument("--country-field", default="country", - help="Field that contains countries in NDJSON records.") - parser.add_argument("--division-field", default="division", - help="Field that contains divisions in NDJSON records.") - parser.add_argument("--location-field", default="location", - help="Field that contains location in NDJSON records.") - parser.add_argument("--geolocation-rules", metavar="TSV", required=True, - help="TSV file of geolocation rules with the format: " + - "'' where the raw and annotated geolocations " + - "are formatted as '///'. " + - "If creating a general rule, then the raw field value can be substituted with '*'." + - "Lines starting with '#' will be ignored as comments." + - "Trailing '#' will be ignored as comments.") - - args = parser.parse_args() - - location_fields = [args.region_field, args.country_field, args.division_field, args.location_field] - - geolocation_rules = load_geolocation_rules(args.geolocation_rules) - - for record in stdin: - record = json.loads(record) - - try: - annotated_values = transform_geolocations(geolocation_rules, [record.get(field, '') for field in location_fields]) - except CyclicGeolocationRulesError as e: - print(e, file=stderr) - exit(1) - - for index, field in enumerate(location_fields): - record[field] = annotated_values[index] - - json.dump(record, stdout, allow_nan=False, indent=None, separators=',:') - print() diff --git a/ingest/vendored/merge-user-metadata b/ingest/vendored/merge-user-metadata deleted file mode 100755 index 341c2df..0000000 --- a/ingest/vendored/merge-user-metadata +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 -""" -Merges user curated annotations with the NDJSON records from stdin, with the user -curations overwriting the existing fields. The modified records are output -to stdout. This does not do any additional transformations on top of the user -curations. -""" -import argparse -import csv -import json -from collections import defaultdict -from sys import exit, stdin, stderr, stdout - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument("--annotations", metavar="TSV", required=True, - help="Manually curated annotations TSV file. " + - "The TSV should not have a header and should have exactly three columns: " + - "id to match existing metadata, field name, and field value. " + - "If there are multiple annotations for the same id and field, then the last value is used. " + - "Lines starting with '#' are treated as comments. " + - "Any '#' after the field value are treated as comments.") - parser.add_argument("--id-field", default="accession", - help="The ID field in the metadata to use to merge with the annotations.") - - args = parser.parse_args() - - annotations = defaultdict(dict) - with open(args.annotations, 'r') as annotations_fh: - csv_reader = csv.reader(annotations_fh, delimiter='\t') - for row in csv_reader: - if not row or row[0].lstrip()[0] == '#': - continue - elif len(row) != 3: - print("WARNING: Could not decode annotation line " + "\t".join(row), file=stderr) - continue - id, field, value = row - annotations[id][field] = value.partition('#')[0].rstrip() - - for record in stdin: - record = json.loads(record) - - record_id = record.get(args.id_field) - if record_id is None: - print(f"ERROR: ID field {args.id_field!r} does not exist in record", file=stderr) - exit(1) - - record.update(annotations.get(record_id, {})) - - json.dump(record, stdout, allow_nan=False, indent=None, separators=',:') - print() diff --git a/ingest/vendored/tests/transform-strain-names/transform-strain-names.t b/ingest/vendored/tests/transform-strain-names/transform-strain-names.t deleted file mode 100644 index 1c05df7..0000000 --- a/ingest/vendored/tests/transform-strain-names/transform-strain-names.t +++ /dev/null @@ -1,17 +0,0 @@ -Look for strain name in "strain" or a list of backup fields. - -If strain entry exists, do not do anything. - - $ echo '{"strain": "i/am/a/strain", "strain_s": "other"}' \ - > | $TESTDIR/../../transform-strain-names \ - > --strain-regex '^.+$' \ - > --backup-fields strain_s accession - {"strain":"i/am/a/strain","strain_s":"other"} - -If strain entry does not exists, search the backup fields - - $ echo '{"strain_s": "other"}' \ - > | $TESTDIR/../../transform-strain-names \ - > --strain-regex '^.+$' \ - > --backup-fields accession strain_s - {"strain_s":"other","strain":"other"} \ No newline at end of file diff --git a/ingest/vendored/transform-authors b/ingest/vendored/transform-authors deleted file mode 100755 index 0bade20..0000000 --- a/ingest/vendored/transform-authors +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python3 -""" -Abbreviates a full list of authors to be ' et al.' of the NDJSON -record from stdin and outputs modified records to stdout. - -Note: This is a "best effort" approach and can potentially mangle the author name. -""" -import argparse -import json -import re -from sys import stderr, stdin, stdout - - -def parse_authors(record: dict, authors_field: str, default_value: str, - index: int, abbr_authors_field: str = None) -> dict: - # Strip and normalize whitespace - new_authors = re.sub(r'\s+', ' ', record[authors_field]) - - if new_authors == "": - new_authors = default_value - else: - # Split authors list on comma/semicolon - # OR "and"/"&" with at least one space before and after - new_authors = re.split(r'(?:\s*[,,;;]\s*|\s+(?:and|&)\s+)', new_authors)[0] - - # if it does not already end with " et al.", add it - if not new_authors.strip('. ').endswith(" et al"): - new_authors += ' et al' - - if abbr_authors_field: - if record.get(abbr_authors_field): - print( - f"WARNING: the {abbr_authors_field!r} field already exists", - f"in record {index} and will be overwritten!", - file=stderr - ) - - record[abbr_authors_field] = new_authors - else: - record[authors_field] = new_authors - - return record - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument("--authors-field", default="authors", - help="The field containing list of authors.") - parser.add_argument("--default-value", default="?", - help="Default value to use if authors list is empty.") - parser.add_argument("--abbr-authors-field", - help="The field for the generated abbreviated authors. " + - "If not provided, the original authors field will be modified.") - - args = parser.parse_args() - - for index, record in enumerate(stdin): - record = json.loads(record) - - parse_authors(record, args.authors_field, args.default_value, index, args.abbr_authors_field) - - json.dump(record, stdout, allow_nan=False, indent=None, separators=',:') - print() diff --git a/ingest/vendored/transform-field-names b/ingest/vendored/transform-field-names deleted file mode 100755 index fde223f..0000000 --- a/ingest/vendored/transform-field-names +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python3 -""" -Renames fields of the NDJSON record from stdin and outputs modified records -to stdout. -""" -import argparse -import json -from sys import stderr, stdin, stdout - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument("--field-map", nargs="+", - help="Fields names in the NDJSON record mapped to new field names, " + - "formatted as '{old_field_name}={new_field_name}'. " + - "If the old field does not exist in record, the new field will be added with an empty string value." + - "If the new field already exists in record, then the renaming of the old field will be skipped.") - parser.add_argument("--force", action="store_true", - help="Force renaming of old field even if the new field already exists. " + - "Please keep in mind this will overwrite the value of the new field.") - - args = parser.parse_args() - - field_map = {} - for field in args.field_map: - old_name, new_name = field.split('=') - field_map[old_name] = new_name - - for record in stdin: - record = json.loads(record) - - for old_field, new_field in field_map.items(): - - if record.get(new_field) and not args.force: - print( - f"WARNING: skipping rename of {old_field} because record", - f"already has a field named {new_field}.", - file=stderr - ) - continue - - record[new_field] = record.pop(old_field, '') - - json.dump(record, stdout, allow_nan=False, indent=None, separators=',:') - print() diff --git a/ingest/vendored/transform-genbank-location b/ingest/vendored/transform-genbank-location deleted file mode 100755 index 70ba56f..0000000 --- a/ingest/vendored/transform-genbank-location +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python3 -""" -Parses GenBank's 'location' field of the NDJSON record from stdin to 3 separate -fields: 'country', 'division', and 'location'. Checks that a record is from -GenBank by verifying that the 'database' field has a value of "GenBank" or "RefSeq". - -Outputs the modified record to stdout. -""" -import json -from sys import stdin, stdout - - -def parse_location(record: dict) -> dict: - # Expected pattern for the location field is "[:][, ]" - # See GenBank docs for their "country" field: - # https://www.ncbi.nlm.nih.gov/genbank/collab/country/ - geographic_data = record['location'].split(':') - - country = geographic_data[0] - division = '' - location = '' - - if len(geographic_data) == 2: - division , _ , location = geographic_data[1].partition(',') - - record['country'] = country.strip() - record['division'] = division.strip() - record['location'] = location.strip() - - return record - - -if __name__ == '__main__': - - for record in stdin: - record = json.loads(record) - - database = record.get('database', '') - if database in {'GenBank', 'RefSeq'}: - parse_location(record) - - json.dump(record, stdout, allow_nan=False, indent=None, separators=',:') - print() diff --git a/ingest/vendored/transform-strain-names b/ingest/vendored/transform-strain-names deleted file mode 100755 index d86c0e4..0000000 --- a/ingest/vendored/transform-strain-names +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python3 -""" -Verifies strain name pattern in the 'strain' field of the NDJSON record from -stdin. Adds a 'strain' field to the record if it does not already exist. - -Outputs the modified records to stdout. -""" -import argparse -import json -import re -from sys import stderr, stdin, stdout - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument("--strain-regex", default="^.+$", - help="Regex pattern for strain names. " + - "Strain names that do not match the pattern will be dropped.") - parser.add_argument("--backup-fields", nargs="*", - help="List of backup fields to use as strain name if the value in 'strain' " + - "does not match the strain regex pattern. " + - "If multiple fields are provided, will use the first field that has a non-empty string.") - - args = parser.parse_args() - - strain_name_pattern = re.compile(args.strain_regex) - - for index, record in enumerate(stdin): - record = json.loads(record) - - # Verify strain name matches the strain regex pattern - if strain_name_pattern.match(record.get('strain', '')) is None: - # Default to empty string if not matching pattern - record['strain'] = '' - # Use non-empty value of backup fields if provided - if args.backup_fields: - for field in args.backup_fields: - if record.get(field): - record['strain'] = str(record[field]) - break - - if record['strain'] == '': - print(f"WARNING: Record number {index} has an empty string as the strain name.", file=stderr) - - - json.dump(record, stdout, allow_nan=False, indent=None, separators=',:') - print()