Skip to content

Commit 9795726

Browse files
committed
add pre commit
1 parent 80364bf commit 9795726

File tree

7 files changed

+62
-22
lines changed

7 files changed

+62
-22
lines changed

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
__pychache__
2-
*/__pychache__
2+
*/__pychache__

.pre-commit-config.yaml

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# See https://pre-commit.com for more information
2+
# See https://pre-commit.com/hooks.html for more hooks
3+
repos:
4+
- repo: https://github.com/pre-commit/pre-commit-hooks
5+
rev: v4.5.0
6+
hooks:
7+
- id: trailing-whitespace
8+
- id: check-yaml
9+
- id: check-added-large-files
10+
- id: end-of-file-fixer
11+
12+
- repo: https://github.com/psf/black
13+
rev: 23.10.1
14+
hooks:
15+
- id: black

src/CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,4 +25,4 @@ Serving on http://127.0.0.1:8000/
2525
```
2626

2727
- push your changes to your forked repository
28-
- open a pull request
28+
- open a pull request

src/datasets_raw.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,4 +22,4 @@
2222
| 2023_SaintLuc_VisTacMotionFoR_IS_raw | 21 | True | True | ['participant_id', 'codename', 'date', 'age', 'gender'] | False | ['anat', 'func'] | ['001', '002'] | ['handDown', 'handUp', 'mtMstLocalizer', 'tactileLocalizer2', 'visual', 'visualLocalizer2'] | nan | nan | nan |
2323
| 2023_Trento_plosBiology_YX_raw | 48 | True | False | ['participant_id'] | False | ['anat', 'fmap', 'func'] | [] | ['judgement', 'resting'] | nan | nan | nan |
2424
| Toronto_VisMotionLocalizer_MR_raw | 3 | True | False | ['participant_id'] | False | ['anat', 'func'] | ['01'] | ['visMotion'] | nan | nan | nan |
25-
| olf_blind_raw | 35 | True | True | ['participant_id', 'Group', 'Sex', 'Age', 'Educational level', 'Smoker', 'Medication', 'Vision level', 'Use of guide dog', 'Use of white cane', 'Musical practice', 'Braille reading', 'Braille reading hand used', 'Age of total blindness onset', 'Blindness Reason', 'Handedness', 'DK_C1_Letter_Fluency', 'DK_C2_Category_Fluency', 'DK_C3_Category_SwitchingTC', 'DK_C3_Category_SwitchingACC', 'TEA_C2', 'TEA_C3', 'CVLT_T1_T5', 'CVLT_ImmediateFreeRecall', 'CVLT_ImmediateCuedRecall', 'CVLT_DelayedFreeRecall', 'CVLT_DelayedCuedRecall', 'CVLT_Recognition', 'SS_Iden_O1', 'SS_Iden_O2', 'SS_Iden_O3', 'SS_Iden_O4', 'SS_Iden_O5', 'SS_Iden_O6', 'SS_Iden_O7', 'SS_Iden_O8', 'SS_Iden_O9', 'SS_Iden_O10', 'SS_Iden_O11', 'SS_Iden_O12', 'SS_Iden_O13', 'SS_Iden_O14', 'SS_Iden_O15', 'SS_Iden_O16', 'SS_Pls_O1', 'SS_Pls_O2', 'SS_Pls_O3', 'SS_Pls_O4', 'SS_Pls_O5', 'SS_Pls_O6', 'SS_Pls_O7', 'SS_Pls_O8', 'SS_Pls_O9', 'SS_Pls_O10', 'SS_Pls_O11', 'SS_Pls_O12', 'SS_Pls_O13', 'SS_Pls_O14', 'SS_Pls_O15', 'SS_Pls_O16', 'SS_Int_O1', 'SS_Int_O2', 'SS_Int_O3', 'SS_Int_O4', 'SS_Int_O5', 'SS_Int_O6', 'SS_Int_O7', 'SS_Int_O8', 'SS_Int_O9', 'SS_Int_O10', 'SS_Int_O11', 'SS_Int_O12', 'SS_Int_O13', 'SS_Int_O14', 'SS_Int_O15', 'SS_Int_O16', 'SS_Threshold_rightN', 'SS_Threshold_leftN', 'SS_Discrimination_Total', 'SS_OM_Total', 'SS_OM_Hits', 'SS_OM_FalseAlarms', 'SS_OM_Miss', 'SS_OM_Correct_Rejection', 'SS_OM_O1', 'SS_OM_O2', 'SS_OM_O3', 'SS_OM_O4', 'SS_OM_O5', 'SS_OM_O6', 'SS_OM_O7', 'SS_OM_O8', 'SS_OM_O9', 'SS_OM_O10', 'SS_OM_O11', 'SS_OM_O12', 'SS_OM_O13', 'SS_OM_O14', 'SS_OM_O15', 'SS_OM_O16', 'IRM_Pleasantness_Eucalyptus', 'IRM_Pleasantness_Almond', 'IRM_Intensity_Eucalyptus', 'IRM_Intensity_Almond'] | False | ['anat', 'dwi', 'func'] | [] | ['olfid', 'olfloc', 'rest'] | nan | nan | nan |
25+
| olf_blind_raw | 35 | True | True | ['participant_id', 'Group', 'Sex', 'Age', 'Educational level', 'Smoker', 'Medication', 'Vision level', 'Use of guide dog', 'Use of white cane', 'Musical practice', 'Braille reading', 'Braille reading hand used', 'Age of total blindness onset', 'Blindness Reason', 'Handedness', 'DK_C1_Letter_Fluency', 'DK_C2_Category_Fluency', 'DK_C3_Category_SwitchingTC', 'DK_C3_Category_SwitchingACC', 'TEA_C2', 'TEA_C3', 'CVLT_T1_T5', 'CVLT_ImmediateFreeRecall', 'CVLT_ImmediateCuedRecall', 'CVLT_DelayedFreeRecall', 'CVLT_DelayedCuedRecall', 'CVLT_Recognition', 'SS_Iden_O1', 'SS_Iden_O2', 'SS_Iden_O3', 'SS_Iden_O4', 'SS_Iden_O5', 'SS_Iden_O6', 'SS_Iden_O7', 'SS_Iden_O8', 'SS_Iden_O9', 'SS_Iden_O10', 'SS_Iden_O11', 'SS_Iden_O12', 'SS_Iden_O13', 'SS_Iden_O14', 'SS_Iden_O15', 'SS_Iden_O16', 'SS_Pls_O1', 'SS_Pls_O2', 'SS_Pls_O3', 'SS_Pls_O4', 'SS_Pls_O5', 'SS_Pls_O6', 'SS_Pls_O7', 'SS_Pls_O8', 'SS_Pls_O9', 'SS_Pls_O10', 'SS_Pls_O11', 'SS_Pls_O12', 'SS_Pls_O13', 'SS_Pls_O14', 'SS_Pls_O15', 'SS_Pls_O16', 'SS_Int_O1', 'SS_Int_O2', 'SS_Int_O3', 'SS_Int_O4', 'SS_Int_O5', 'SS_Int_O6', 'SS_Int_O7', 'SS_Int_O8', 'SS_Int_O9', 'SS_Int_O10', 'SS_Int_O11', 'SS_Int_O12', 'SS_Int_O13', 'SS_Int_O14', 'SS_Int_O15', 'SS_Int_O16', 'SS_Threshold_rightN', 'SS_Threshold_leftN', 'SS_Discrimination_Total', 'SS_OM_Total', 'SS_OM_Hits', 'SS_OM_FalseAlarms', 'SS_OM_Miss', 'SS_OM_Correct_Rejection', 'SS_OM_O1', 'SS_OM_O2', 'SS_OM_O3', 'SS_OM_O4', 'SS_OM_O5', 'SS_OM_O6', 'SS_OM_O7', 'SS_OM_O8', 'SS_OM_O9', 'SS_OM_O10', 'SS_OM_O11', 'SS_OM_O12', 'SS_OM_O13', 'SS_OM_O14', 'SS_OM_O15', 'SS_OM_O16', 'IRM_Pleasantness_Eucalyptus', 'IRM_Pleasantness_Almond', 'IRM_Intensity_Eucalyptus', 'IRM_Intensity_Almond'] | False | ['anat', 'dwi', 'func'] | [] | ['olfid', 'olfloc', 'rest'] | nan | nan | nan |

tools/list_raw.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from utils import init_dataset
1313
from utils import list_datasets_in_dir
1414

15-
cpp_raw = Path(__file__).parent.parent / 'cpp-lln-lab_raw'
15+
cpp_raw = Path(__file__).parent.parent / "cpp-lln-lab_raw"
1616

1717

1818
# Overwrite the tsv file with the current raw datasets
@@ -29,17 +29,19 @@
2929

3030
root_dir = Path(__file__).parent.parent
3131

32-
output_file = Path(__file__).parent / 'datasets_raw.tsv'
32+
output_file = Path(__file__).parent / "datasets_raw.tsv"
3333

3434
datasets_df.to_csv(output_file, index=False, sep="\t")
3535

36-
mk_file = Path(__file__).parent.parent / 'src/datasets_raw.md'
36+
mk_file = Path(__file__).parent.parent / "src/datasets_raw.md"
3737

38-
datasets_df.drop(columns=[
39-
"has_participant_tsv",
40-
"has_participant_json",
41-
"has_phenotype_dir",
42-
"participant_columns"],
43-
inplace=True
38+
datasets_df.drop(
39+
columns=[
40+
"has_participant_tsv",
41+
"has_participant_json",
42+
"has_phenotype_dir",
43+
"participant_columns",
44+
],
45+
inplace=True,
4446
)
4547
datasets_df.to_markdown(mk_file, index=False, mode="a")

tools/print_dataset_listing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
"suffixes",
1313
"link to full data",
1414
"maintained by",
15-
]
15+
]

tools/utils.py

Lines changed: 32 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
"""Utility functions for tools."""
32

43
from typing import Any
@@ -23,6 +22,7 @@ def new_dataset(name: str) -> dict[str, str | int | bool | list[str]]:
2322
"mriqc": "n/a",
2423
}
2524

25+
2626
def init_dataset() -> dict[str, list[Any]]:
2727
return {
2828
"name": [],
@@ -39,7 +39,8 @@ def init_dataset() -> dict[str, list[Any]]:
3939
"freesurfer": [], # link to freesurfer dataset if exists
4040
"mriqc": [], # link to mriqc dataset if exists
4141
}
42-
42+
43+
4344
def is_known_bids_modality(modality: str) -> bool:
4445
KNOWN_MODALITIES = [
4546
"anat",
@@ -57,36 +58,46 @@ def is_known_bids_modality(modality: str) -> bool:
5758
"motion",
5859
]
5960
return modality in KNOWN_MODALITIES
60-
61+
62+
6163
def list_modalities(bids_pth: Path, sessions: list[str]) -> list[str]:
6264
pattern = "sub-*/ses-*/*" if sessions else "sub-*/*"
6365
sub_dirs = [v.name for v in bids_pth.glob(pattern) if v.is_dir()]
6466
modalities = [v for v in set(sub_dirs) if is_known_bids_modality(v)]
6567
return list(set(modalities))
6668

69+
6770
def list_data_files(bids_pth: Path, sessions: list[str]) -> list[str]:
6871
"""Return the list of files in BIDS raw."""
6972
pattern = "sub-*/ses-*/*/*" if sessions else "sub-*/*/*"
7073
files = [v.name for v in bids_pth.glob(pattern) if "task-" in v.name]
7174
return files
7275

76+
7377
def list_tasks(bids_pth: Path, sessions: list[str]) -> list[str]:
7478
files = list_data_files(bids_pth, sessions)
7579
tasks = [f.split("task-")[1].split("_")[0] for f in files]
7680
tasks = list(set(tasks))
7781
return tasks
7882

83+
7984
def get_nb_subjects(pth: Path) -> int:
8085
return len(list_participants_in_dataset(pth))
8186

87+
8288
def has_participant_tsv(pth: Path) -> tuple[bool, bool, str | list[str]]:
8389
tsv_status = bool((pth / "participants.tsv").exists())
8490
json_status = bool((pth / "participants.json").exists())
8591
if tsv_status:
86-
return tsv_status, json_status, list_participants_tsv_columns(pth / "participants.tsv")
92+
return (
93+
tsv_status,
94+
json_status,
95+
list_participants_tsv_columns(pth / "participants.tsv"),
96+
)
8797
else:
8898
return tsv_status, json_status, "n/a"
89-
99+
100+
90101
def list_participants_tsv_columns(participant_tsv: Path) -> list[str]:
91102
"""Return the list of columns in participants.tsv."""
92103
try:
@@ -96,6 +107,7 @@ def list_participants_tsv_columns(participant_tsv: Path) -> list[str]:
96107
warn(f"Could not parse: {participant_tsv}")
97108
return ["cannot be parsed"]
98109

110+
99111
def list_datasets_in_dir(
100112
datasets: dict[str, list[Any]], path: Path, debug: bool
101113
) -> dict[str, list[Any]]:
@@ -147,23 +159,34 @@ def list_datasets_in_dir(
147159

148160
return datasets
149161

162+
150163
def list_sessions(dataset_pth: Path) -> list[str]:
151-
sessions = [v.name.replace("ses-", "") for v in dataset_pth.glob("sub-*/ses-*") if v.is_dir()]
164+
sessions = [
165+
v.name.replace("ses-", "")
166+
for v in dataset_pth.glob("sub-*/ses-*")
167+
if v.is_dir()
168+
]
152169
return sorted(list(set(sessions)))
153170

171+
154172
def check_task(
155173
tasks: list[str], modalities: list[str], sessions: list[str], dataset_pth: Path
156174
) -> None:
157175
"""Check if tasks are present in dataset with modalities that can have tasks."""
158176
if (
159-
any(mod in modalities for mod in ["func", "eeg", "ieeg", "meg", "beh", "motion"])
177+
any(
178+
mod in modalities for mod in ["func", "eeg", "ieeg", "meg", "beh", "motion"]
179+
)
160180
and not tasks
161181
):
162182
warn(
163183
f"no tasks found in {dataset_pth} "
164184
f"with modalities {modalities} "
165185
f"and files {list_data_files(dataset_pth, sessions)}"
166186
)
167-
187+
188+
168189
def list_participants_in_dataset(data_pth: Path) -> list[str]:
169-
return [x.name for x in data_pth.iterdir() if x.is_dir() and x.name.startswith("sub-")]
190+
return [
191+
x.name for x in data_pth.iterdir() if x.is_dir() and x.name.startswith("sub-")
192+
]

0 commit comments

Comments
 (0)