Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Convert CBMAEstimator method to function #658

Merged
merged 9 commits into from
Mar 21, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/01_datasets/01_plot_dataset_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

###############################################################################
# Start with the necessary imports
# --------------------------------
# -----------------------------------------------------------------------------
import os

from nimare.dataset import Dataset
Expand Down
10 changes: 5 additions & 5 deletions examples/01_datasets/02_download_neurosynth.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
"""
###############################################################################
# Start with the necessary imports
# --------------------------------
# -----------------------------------------------------------------------------
import os
from pprint import pprint

Expand All @@ -41,7 +41,7 @@

###############################################################################
# Download Neurosynth
# -------------------
# -----------------------------------------------------------------------------
# Neurosynth's data files are stored at https://github.com/neurosynth/neurosynth-data.
out_dir = os.path.abspath("../example_data/")
os.makedirs(out_dir, exist_ok=True)
Expand All @@ -59,7 +59,7 @@

###############################################################################
# Convert Neurosynth database to NiMARE dataset file
# --------------------------------------------------
# -----------------------------------------------------------------------------
neurosynth_dset = convert_neurosynth_to_dataset(
coordinates_file=neurosynth_db["coordinates"],
metadata_file=neurosynth_db["metadata"],
Expand All @@ -70,7 +70,7 @@

###############################################################################
# Add article abstracts to dataset
# --------------------------------
# -----------------------------------------------------------------------------
# This is only possible because Neurosynth uses PMIDs as study IDs.
#
# Make sure you replace the example email address with your own.
Expand All @@ -79,7 +79,7 @@

###############################################################################
# Do the same with NeuroQuery
# ---------------------------
# -----------------------------------------------------------------------------
# NeuroQuery's data files are stored at https://github.com/neuroquery/neuroquery_data.
files = fetch_neuroquery(
data_dir=out_dir,
Expand Down
6 changes: 3 additions & 3 deletions examples/01_datasets/03_plot_neurovault_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

###############################################################################
# Neurovault + NiMARE: Load freely shared statistical maps for Meta-Analysis
# --------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# `Neurovault <https://neurovault.org/>`_ is an online platform that hosts
# unthresholded statistical maps, including group statistical maps.
# NiMARE can read these statistical maps when given a list of collection_ids.
Expand Down Expand Up @@ -64,7 +64,7 @@

###############################################################################
# Conversion of Statistical Maps
# ------------------------------
# -----------------------------------------------------------------------------
# Some of the statistical maps are T statistics and others are Z statistics.
# To perform a Fisher's meta analysis, we need all Z maps.
# Thoughtfully, NiMARE has a class named ``ImageTransformer`` that will
Expand All @@ -84,7 +84,7 @@

###############################################################################
# Run a Meta-Analysis
# -------------------
# -----------------------------------------------------------------------------
# With the missing Z maps filled in, we can run a Meta-Analysis
# and plot our results
from nimare.meta.ibma import Fishers
Expand Down
10 changes: 5 additions & 5 deletions examples/01_datasets/04_transform_images_to_coordinates.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,12 @@

###############################################################################
# Download data
# --------------------------------
# -----------------------------------------------------------------------------
dset_dir = download_nidm_pain()

###############################################################################
# Load Dataset
# --------------------------------------------------
# -----------------------------------------------------------------------------
dset_file = os.path.join(get_resource_path(), "nidm_pain_dset.json")
dset = Dataset(dset_file)
dset.update_path(dset_dir)
Expand All @@ -53,7 +53,7 @@

###############################################################################
# Inspect Dataset
# --------------------------------------------------
# -----------------------------------------------------------------------------

# There is only one study contrast with coordinates, but no images
print(f"studies with only coordinates: {set(dset.coordinates['id']) - set(dset.images['id'])}\n")
Expand All @@ -69,7 +69,7 @@

###############################################################################
# Use different strategies to overwrite existing coordinate data
# --------------------------------------------------------------
# -----------------------------------------------------------------------------
# There are three choices for how to treat existing coordinate
# data in the dataset which are named: 'fill', 'replace', and 'demolish'.
#
Expand Down Expand Up @@ -101,7 +101,7 @@

###############################################################################
# Inspect generated datasets
# --------------------------
# -----------------------------------------------------------------------------

example_study = "pain_01.nidm-1"

Expand Down
16 changes: 8 additions & 8 deletions examples/02_meta-analyses/05_plot_correctors.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@

###############################################################################
# Download data
# --------------------------------
# -----------------------------------------------------------------------------
from nimare.extract import download_nidm_pain

dset_dir = download_nidm_pain()

###############################################################################
# Load Dataset
# --------------------------------------------------
# -----------------------------------------------------------------------------
import os

from nimare.dataset import Dataset
Expand All @@ -38,7 +38,7 @@
# .. _corrector-cbma-example:
#
# Multiple comparisons correction in coordinate-based meta-analyses
# -----------------------------------------------------------------
# -----------------------------------------------------------------------------
# .. tip::
# For more information multiple comparisons correction and CBMA in NiMARE,
# see :ref:`multiple comparisons correction`.
Expand All @@ -56,7 +56,7 @@

###############################################################################
# Apply the Corrector to the MetaResult
# =====================================
# =============================================================================
# Now that we know what FWE correction methods are available, we can use one.
#
# The "montecarlo" method is a special one that is implemented within the
Expand Down Expand Up @@ -89,7 +89,7 @@

###############################################################################
# Show corrected results
# ======================
# =============================================================================
MAPS_TO_PLOT = [
"z",
"z_desc-size_level-cluster_corr-FWE_method-montecarlo",
Expand Down Expand Up @@ -122,7 +122,7 @@

###############################################################################
# Multiple comparisons correction in image-based meta-analyses
# ------------------------------------------------------------
# -----------------------------------------------------------------------------
from nimare.meta.ibma import Stouffers

meta = Stouffers(resample=True)
Expand All @@ -138,13 +138,13 @@

###############################################################################
# Apply the Corrector to the MetaResult
# =====================================
# =============================================================================
corr = FDRCorrector(method="indep", alpha=0.05)
cres = corr.transform(results)

###############################################################################
# Show corrected results
# ======================
# =============================================================================
fig, axes = plt.subplots(figsize=(8, 6), nrows=2)
plot_stat_map(
cres.get_map("z"),
Expand Down
4 changes: 2 additions & 2 deletions examples/02_meta-analyses/06_plot_compare_ibma_and_cbma.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,12 @@

###############################################################################
# Download data
# --------------------------------
# -----------------------------------------------------------------------------
dset_dir = download_nidm_pain()

###############################################################################
# Load Dataset
# --------------------------------------------------
# -----------------------------------------------------------------------------
dset_file = os.path.join(get_resource_path(), "nidm_pain_dset.json")
dset = Dataset(dset_file)
dset.update_path(dset_dir)
Expand Down
12 changes: 6 additions & 6 deletions examples/02_meta-analyses/07_macm.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,15 @@

###############################################################################
# Load Dataset
# --------------------------------------------------
# -----------------------------------------------------------------------------
# We will assume that the Neurosynth database has already been downloaded and
# converted to a NiMARE dataset.
dset_file = "neurosynth_nimare_with_abstracts.pkl.gz"
dset = Dataset.load(dset_file)

###############################################################################
# Define a region of interest
# --------------------------------------------------
# -----------------------------------------------------------------------------
# We'll use the right amygdala from the Harvard-Oxford atlas
atlas = datasets.fetch_atlas_harvard_oxford("sub-maxprob-thr50-2mm")
img = nib.load(atlas["maps"])
Expand All @@ -43,22 +43,22 @@

###############################################################################
# Select studies with a reported coordinate in the ROI
# ----------------------------------------------------
# -----------------------------------------------------------------------------
roi_ids = dset.get_studies_by_mask(roi_img)
dset_sel = dset.slice(roi_ids)
print(f"{len(roi_ids)}/{len(dset.ids)} studies report at least one coordinate in the ROI")

###############################################################################
# Select studies with *no* reported coordinates in the ROI
# --------------------------------------------------------
# -----------------------------------------------------------------------------
no_roi_ids = list(set(dset.ids).difference(roi_ids))
dset_unsel = dset.slice(no_roi_ids)
print(f"{len(no_roi_ids)}/{len(dset.ids)} studies report zero coordinates in the ROI")


###############################################################################
# MKDA Chi2 with FWE correction
# --------------------------------------------------
# -----------------------------------------------------------------------------
mkda = MKDAChi2(kernel__r=10)
mkda.fit(dset_sel, dset_unsel)

Expand All @@ -75,7 +75,7 @@

###############################################################################
# SCALE
# --------------------------------------------------
# -----------------------------------------------------------------------------
# Another good option for a MACM analysis is the SCALE algorithm, which was
# designed specifically for MACM. Unfortunately, SCALE does not support
# multiple-comparisons correction.
Expand Down
18 changes: 9 additions & 9 deletions examples/02_meta-analyses/09_plot_simulated_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

###############################################################################
# Create function to perform a meta-analysis and plot results
# -----------------------------------------------------------
# -----------------------------------------------------------------------------


def analyze_and_plot(dset, ground_truth_foci=None, correct=True, return_cres=False):
Expand Down Expand Up @@ -61,7 +61,7 @@ def analyze_and_plot(dset, ground_truth_foci=None, correct=True, return_cres=Fal

###############################################################################
# Create Dataset
# --------------------------------------------------
# -----------------------------------------------------------------------------
# In this example, each of the 30 generated fake studies
# select 4 coordinates from a probability map representing the probability
# that particular coordinate will be chosen.
Expand All @@ -76,7 +76,7 @@ def analyze_and_plot(dset, ground_truth_foci=None, correct=True, return_cres=Fal

###############################################################################
# Analyze and plot simple dataset
# -------------------------------
# -----------------------------------------------------------------------------
# The red dots in this plot and subsequent plots represent the
# simulated ground truth foci, and the clouds represent the statistical
# maps of the simulated data.
Expand All @@ -85,7 +85,7 @@ def analyze_and_plot(dset, ground_truth_foci=None, correct=True, return_cres=Fal

###############################################################################
# Fine-tune dataset creation
# --------------------------
# -----------------------------------------------------------------------------
# Perhaps you want more control over the studies being generated.
# you can set:
#
Expand All @@ -110,14 +110,14 @@ def analyze_and_plot(dset, ground_truth_foci=None, correct=True, return_cres=Fal

###############################################################################
# Analyze and plot manual dataset
# -------------------------------
# -----------------------------------------------------------------------------

fig = analyze_and_plot(manual_dset, ground_truth_foci)
fig.show()

###############################################################################
# Control percentage of studies with the foci of interest
# -------------------------------------------------------
# -----------------------------------------------------------------------------
# Often times a converging peak is not found in all studies within
# the meta-analysis, but only a portion.
# We can select a percentage of studies where a coordinate
Expand All @@ -129,14 +129,14 @@ def analyze_and_plot(dset, ground_truth_foci=None, correct=True, return_cres=Fal

###############################################################################
# Analyze and plot the 50% foci dataset
# -------------------------------------
# -----------------------------------------------------------------------------

fig = analyze_and_plot(perc_foci_dset, ground_truth_foci[0:2])
fig.show()

###############################################################################
# Create a null dataset
# --------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Perhaps you are interested in the number of false positives your favorite
# meta-analysis algorithm typically gives.
# At an alpha of 0.05 we would expect no more than 5% of results to be false positives.
Expand All @@ -149,7 +149,7 @@ def analyze_and_plot(dset, ground_truth_foci=None, correct=True, return_cres=Fal

###############################################################################
# Analyze and plot no foci dataset
# --------------------------------
# -----------------------------------------------------------------------------
# When not performing a multiple comparisons correction,
# there is a false positive rate of approximately 5%.

Expand Down
8 changes: 4 additions & 4 deletions examples/02_meta-analyses/10_peaks2maps.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
"""
###############################################################################
# Start with the necessary imports
# --------------------------------
# -----------------------------------------------------------------------------
import os

from nilearn.plotting import plot_glass_brain
Expand All @@ -22,19 +22,19 @@

###############################################################################
# Load Dataset
# --------------------------------------------------
# -----------------------------------------------------------------------------
dset_file = os.path.join(get_resource_path(), "nidm_pain_dset.json")
dset = Dataset(dset_file)

###############################################################################
# Run peaks2maps
# --------------------------------------------------
# -----------------------------------------------------------------------------
k = Peaks2MapsKernel()
imgs = k.transform(dset, return_type="image")

###############################################################################
# Plot modeled activation maps
# --------------------------------------------------
# -----------------------------------------------------------------------------
for img in imgs:
display = plot_glass_brain(
img, display_mode="lyrz", plot_abs=False, colorbar=True, vmax=1, threshold=0
Expand Down
6 changes: 3 additions & 3 deletions examples/03_annotation/01_plot_tfidf.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@

###############################################################################
# Load dataset with abstracts
# ---------------------------
# -----------------------------------------------------------------------------
# We'll load a small dataset composed only of studies in Neurosynth with
# Angela Laird as a coauthor, for the sake of speed.
dset = dataset.Dataset(os.path.join(utils.get_resource_path(), "neurosynth_laird_studies.json"))
dset.texts.head(2)

###############################################################################
# Generate term counts
# --------------------
# -----------------------------------------------------------------------------
# Let's start by extracting terms and their associated counts from article
# abstracts.
counts_df = annotate.text.generate_counts(
Expand All @@ -36,7 +36,7 @@

###############################################################################
# Generate term counts
# --------------------
# -----------------------------------------------------------------------------
# We can also extract term frequency-inverse document frequency (tf-idf)
# values from text using the same function.
# While the terms and values will differ based on the dataset provided and the
Expand Down
Loading