Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Disable MA map pre-generation in CorrelationDecoder #637

Merged
merged 3 commits into from
Feb 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 17 additions & 16 deletions nimare/decode/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import pandas as pd
from nilearn._utils import load_niimg
from nilearn.masking import apply_mask
from tqdm.auto import tqdm

from .. import references
from ..base import Decoder
Expand Down Expand Up @@ -184,23 +185,20 @@ def _fit(self, dataset):
"""
self.masker = dataset.masker

# Pre-generate MA maps to speed things up
kernel_transformer = self.meta_estimator.kernel_transformer
dataset = kernel_transformer.transform(dataset, return_type="dataset")

for i, feature in enumerate(self.features_):
n_features = len(self.features_)
for i_feature, feature in enumerate(tqdm(self.features_, total=n_features)):
feature_ids = dataset.get_studies_by_label(
labels=[feature], label_threshold=self.frequency_threshold
labels=[feature],
label_threshold=self.frequency_threshold,
)
# Limit selected studies to studies with valid data
feature_ids = sorted(list(set(feature_ids).intersection(self.inputs_["id"])))

LGR.info(
f"Decoding {feature} ({i}/{len(self.features_)}): {len(feature_ids)}/"
f"{len(dataset.ids)} studies"
)
# Create the reduced Dataset
feature_dset = dataset.slice(feature_ids)
# This seems like a somewhat inelegant solution

# Check if the meta method is a pairwise estimator
# This seems like a somewhat inelegant solution
if "dataset2" in inspect.getfullargspec(self.meta_estimator.fit).args:
nonfeature_ids = sorted(list(set(self.inputs_["id"]) - set(feature_ids)))
nonfeature_dset = dataset.slice(nonfeature_ids)
Expand All @@ -209,19 +207,22 @@ def _fit(self, dataset):
self.meta_estimator.fit(feature_dset)

feature_data = self.meta_estimator.results.get_map(
self.target_image, return_type="array"
self.target_image,
return_type="array",
)
if i == 0:
images_ = np.zeros((len(self.features_), len(feature_data)))
images_[i, :] = feature_data
if i_feature == 0:
images_ = np.zeros((len(self.features_), len(feature_data)), feature_data.dtype)

images_[i_feature, :] = feature_data

self.images_ = images_

def transform(self, img):
"""Correlate target image with each feature-specific meta-analytic map.

Parameters
----------
img : :obj:`nibabel.nifti1.Nifti1Image`
img : :obj:`~nibabel.nifti1.Nifti1Image`
Image to decode. Must be in same space as ``dataset``.

Returns
Expand Down
11 changes: 1 addition & 10 deletions nimare/tests/test_decode_continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,20 +9,11 @@
from nimare.meta import kernel, mkda


def test_CorrelationDecoder_smoke(testdata_laird, tmp_path_factory):
def test_CorrelationDecoder_smoke(testdata_laird):
"""Smoke test for continuous.CorrelationDecoder."""
tmpdir = tmp_path_factory.mktemp("test_CorrelationDecoder")

testdata_laird = testdata_laird.copy()
features = testdata_laird.get_labels(ids=testdata_laird.ids[0])[:5]
decoder = continuous.CorrelationDecoder(features=features)

# No basepath
with pytest.raises(ValueError):
decoder.fit(testdata_laird)

# Let's add the path
testdata_laird.update_path(tmpdir)
decoder.fit(testdata_laird)

# Make an image to decode
Expand Down