Skip to content

Commit

Permalink
Merge pull request #53 from simonsobs/updated_mflike
Browse files Browse the repository at this point in the history
Updated version of mflike, solving issue #50
  • Loading branch information
sgiardie authored Feb 8, 2023
2 parents f271865 + d064697 commit 1ff870d
Show file tree
Hide file tree
Showing 6 changed files with 439 additions and 509 deletions.
31 changes: 0 additions & 31 deletions examples/mflike_example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,37 +6,6 @@ likelihood:
mflike.MFLike:
input_file: data_sacc_00044.fits
cov_Bbl_file: data_sacc_w_covar_and_Bbl.fits
defaults:
# Which spectra?
polarizations: ['TT', 'TE', 'ET', 'EE']
# Scale cuts (in ell) for each spectrum
scales:
TT: [50, 6002]
TE: [50, 6002]
ET: [50, 6002]
EE: [50, 6002]
symmetrize: False
data:
experiments:
LAT:
frequencies: [93, 145, 225]

spectra:
- experiments: ['LAT', 'LAT']
frequencies: [93, 93]
polarizations: ['TT','TE','EE']
- experiments: ['LAT', 'LAT']
frequencies: [93, 145]
- experiments: ['LAT', 'LAT']
frequencies: [93, 225]
- experiments: ['LAT', 'LAT']
frequencies: [145, 145]
polarizations: ['TT','TE','EE']
- experiments: ['LAT', 'LAT']
frequencies: [145, 225]
- experiments: ['LAT', 'LAT']
frequencies: [225, 225]
polarizations: ['TT','TE','EE']

params:
# Sampled
Expand Down
97 changes: 56 additions & 41 deletions mflike/MFLike.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ input_file: null
# realizations that share the same bandpowers and covariance)
cov_Bbl_file: null

# Maximum multipole value up to compute theory Cl
# If set to null i.e. None, the program will set the value to 9000
lmax_theory: null

# Specify default set of spectra and scale cuts
# to be used
defaults:
Expand All @@ -31,10 +35,10 @@ data:
# List the names and frequencies of all the
# relevant experiments.
experiments:
LAT:
frequencies: [93, 145, 225]
# PlanckHFI:
# frequencies: [545]
- LAT_93
- LAT_145
- LAT_225
# - PlanckHFI_545:

spectra:
# Here, list all the different cross-correlations
Expand All @@ -43,37 +47,39 @@ data:
# For each of them, you can specify which spectra
# and scale cuts you'd like to use. If you don't
# specify anything, the defaults will be used.
- experiments: [LAT, LAT]
frequencies: [93, 93]
- experiments: [LAT, LAT]
frequencies: [93, 145]
- experiments: [LAT, LAT]
frequencies: [93, 225]
- experiments: [LAT, LAT]
frequencies: [145, 145]
- experiments: [LAT, LAT]
frequencies: [145, 225]
- experiments: [LAT, LAT]
frequencies: [225, 225]
- experiments: [LAT_93, LAT_93]
- experiments: [LAT_93, LAT_145]
- experiments: [LAT_93, LAT_225]
- experiments: [LAT_145, LAT_145]
- experiments: [LAT_145, LAT_225]
- experiments: [LAT_225, LAT_225]

# Parameters to handle the band integration:
# - external_bandpass sets the usage of an external bandpass file
# - polarized_arrays sets the PA we want to use, e.g. polarized_arrays: ['PA1','PA2']
# - nsteps sets the number of frequencies used in the integration
# Parameters to build a top-hat band:
# - nsteps sets the number of frequencies used in the band integration
# - bandwidth sets the relative width of the band wrt the central frequency
# with bandwidth: 0 no band integration is performed
# if bandwidth > 0 , nsteps must be > 1
# the central frequency of each band is set from the bands stored in the sacc file
# - with nstep: 1, bandwidth must be 0
# Dirac delta bandpass, no band integration
# useful if you don't want to do band integration
# when the bandpasses in the sacc file are multifrequency
# the freq used is the effective frequency from the bandpass
# - if nstep > 1, bandwidth must be > 1
# bandwidth can be a list if you want a different width for each band
# e.g. bandwidth: [0.3,0.2,0.3] for 3 bands
band_integration:
external_bandpass: false
polarized_arrays: false
nsteps: 1
bandwidth: 0
# when top_hat_band is a null dict: no top-hat band is built and
# bandpasses read from sacc file. Band integration is performed accordingly
# (if bandpass in sacc is a single freq, no band integration)
# the default is to read bandpasses from file, to build top-hat uncomment the
# parameters of the block!
top_hat_band:
# nsteps: 1
# bandwidth: 0

# uncomment the block to include a systematic template
# to be read from external file at "rootname"
# default is systematic_template to be a null dict
systematics_template:
has_file: false
rootname: "test_template"
# rootname: "test_template"

foregrounds:
normalisation:
Expand Down Expand Up @@ -184,42 +190,51 @@ params:
proposal: 0.6
latex: T_d
# Systematics
bandint_shift_93:
bandint_shift_LAT_93:
value: 0
latex: \Delta_{\rm band}^{93}
bandint_shift_145:
bandint_shift_LAT_145:
value: 0
latex: \Delta_{\rm band}^{145}
bandint_shift_225:
bandint_shift_LAT_225:
value: 0
latex: \Delta_{\rm band}^{225}
calT_93:
calT_LAT_93:
value: 1
latex: \mathrm{Cal}_{\rm T}^{93}
calE_93:
calE_LAT_93:
value: 1
latex: \mathrm{Cal}_{\rm E}^{93}
calT_145:
calT_LAT_145:
value: 1
latex: \mathrm{Cal}_{\rm T}^{145}
calE_145:
calE_LAT_145:
value: 1
latex: \mathrm{Cal}_{\rm E}^{145}
calT_225:
calT_LAT_225:
value: 1
latex: \mathrm{Cal}_{\rm T}^{225}
calE_225:
calE_LAT_225:
value: 1
latex: \mathrm{Cal}_{\rm E}^{225}
cal_LAT_93:
value: 1
latex: \mathrm{Cal}^{93}
cal_LAT_145:
value: 1
latex: \mathrm{Cal}^{145}
cal_LAT_225:
value: 1
latex: \mathrm{Cal}^{225}
calG_all:
value: 1
latex: \mathrm{Cal}_{\rm G}^{\rm All}
alpha_93:
alpha_LAT_93:
value: 0 #deg
latex: \alpha^{93}
alpha_145:
alpha_LAT_145:
value: 0 #deg
latex: \alpha^{145}
alpha_225:
alpha_LAT_225:
value: 0 #deg
latex: \alpha^{225}
64 changes: 30 additions & 34 deletions mflike/mflike.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class MFLike(InstallableLikelihood):
data: dict
defaults: dict
foregrounds: dict
band_integration: dict
top_hat_band: dict
systematics_template: dict

def initialize(self):
Expand Down Expand Up @@ -63,8 +63,9 @@ def initialize(self):
self.prepare_data()

# State requisites to the theory code
self.lmax_theory = 9000
self.requested_cls = ["tt", "te", "ee"]
self.lmax_theory = self.lmax_theory or 9000
self.log.debug(f"Maximum multipole value: {self.lmax_theory}")

self.expected_params_fg = [
"a_tSZ",
Expand All @@ -84,9 +85,10 @@ def initialize(self):
]

self.expected_params_nuis = ["calG_all"]
for f in self.freqs:
for f in self.experiments:
self.expected_params_nuis += [
f"bandint_shift_{f}",
f"cal_{f}",
f"calT_{f}",
f"calE_{f}",
f"alpha_{f}",
Expand Down Expand Up @@ -126,7 +128,7 @@ def loglike(self, cl, **params_values_nocosmo):
)
return logp

def prepare_data(self, verbose=False):
def prepare_data(self):
import sacc

data = self.data
Expand Down Expand Up @@ -163,24 +165,19 @@ def prepare_data(self, verbose=False):
"BB": "bb",
}

def xp_nu(xp, nu):
return f"{xp}_{nu}"

def get_cl_meta(spec):
# For each of the entries of the `spectra` section of the
# yaml file, extract the relevant information: experiments,
# frequencies, polarization combinations, scale cuts and
# polarization combinations, scale cuts and
# whether TE should be symmetrized.
# Experiments/frequencies
exp_1, exp_2 = spec["experiments"]
freq_1, freq_2 = spec["frequencies"]
# Read off polarization channel combinations
pols = spec.get("polarizations", default_cuts["polarizations"]).copy()
# Read off scale cuts
scls = spec.get("scales", default_cuts["scales"]).copy()

# For the same two channels, do not include ET and TE, only TE
if (exp_1 == exp_2) and (freq_1 == freq_2):
if exp_1 == exp_2:
if "ET" in pols:
pols.remove("ET")
if "TE" not in pols:
Expand All @@ -194,16 +191,16 @@ def get_cl_meta(spec):
else:
symm = False

return exp_1, exp_2, freq_1, freq_2, pols, scls, symm
return exp_1, exp_2, pols, scls, symm

def get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2):
# Translate the polarization combination, experiment
# and frequency names of a given entry in the `spectra`
def get_sacc_names(pol, exp_1, exp_2):
# Translate the polarization combination and experiment
# names of a given entry in the `spectra`
# part of the input yaml file into the names expected
# in the SACC files.
tname_1 = exp_1
tname_2 = exp_2
p1, p2 = pol
tname_1 = xp_nu(exp_1, freq_1)
tname_2 = xp_nu(exp_2, freq_2)
if p1 in ["E", "B"]:
tname_1 += "_s2"
else:
Expand All @@ -227,9 +224,9 @@ def get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2):
# Length of the final data vector
len_compressed = 0
for spectrum in data["spectra"]:
(exp_1, exp_2, freq_1, freq_2, pols, scls, symm) = get_cl_meta(spectrum)
exp_1, exp_2, pols, scls, symm = get_cl_meta(spectrum)
for pol in pols:
tname_1, tname_2, dtype = get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2)
tname_1, tname_2, dtype = get_sacc_names(pol, exp_1, exp_2)
lmin, lmax = scls[pol]
ind = s.indices(
dtype, # Power spectrum type
Expand All @@ -249,8 +246,7 @@ def get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2):
else:
len_compressed += ind.size

if verbose:
print(tname_1, tname_2, dtype, ind.shape, lmin, lmax)
self.log.debug(f"{tname_1} {tname_2} {dtype} {ind.shape} {lmin} {lmax}")

# Get rid of all the unselected power spectra.
# Sacc takes care of performing the same cuts in the
Expand All @@ -267,18 +263,16 @@ def get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2):
# symmetrization option, for which SACC doesn't have native support.
mat_compress = np.zeros([len_compressed, len_full])
mat_compress_b = np.zeros([len_compressed, len_full])
bands = {}

self.lcuts = {k: c[1] for k, c in default_cuts["scales"].items()}
index_sofar = 0

for spectrum in data["spectra"]:
(exp_1, exp_2, freq_1, freq_2, pols, scls, symm) = get_cl_meta(spectrum)
bands[xp_nu(exp_1, freq_1)] = freq_1
bands[xp_nu(exp_2, freq_2)] = freq_2
exp_1, exp_2, pols, scls, symm = get_cl_meta(spectrum)
for k in scls.keys():
self.lcuts[k] = max(self.lcuts[k], scls[k][1])
for pol in pols:
tname_1, tname_2, dtype = get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2)
tname_1, tname_2, dtype = get_sacc_names(pol, exp_1, exp_2)
# The only reason why we need indices is the symmetrization.
# Otherwise all of this could have been done in the previous
# loop over data["spectra"].
Expand Down Expand Up @@ -325,10 +319,8 @@ def get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2):
"pol": ppol_dict[pol],
"hasYX_xsp": pol
in ["ET", "BE", "BT"], # This is necessary for handling symmetrization
"t1": xp_nu(exp_1, freq_1), #
"t2": xp_nu(exp_2, freq_2), #
"nu1": freq_1,
"nu2": freq_2,
"t1": exp_1,
"t2": exp_2,
"leff": ls, #
"cl_data": cls, #
"bpw": ws,
Expand All @@ -344,15 +336,19 @@ def get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2):
self.logp_const = np.log(2 * np.pi) * (-len(self.data_vec) / 2)
self.logp_const -= 0.5 * np.linalg.slogdet(self.cov)[1]

# TODO: we should actually be using bandpass integration
self.bands = sorted(bands)
self.freqs = np.array([bands[b] for b in self.bands])
self.experiments = data["experiments"]
self.bands = {
name: {"nu": tracer.nu, "bandpass": tracer.bandpass}
for name, tracer in s.tracers.items()
}

# Put lcuts in a format that is recognisable by CAMB.
self.lcuts = {k.lower(): c for k, c in self.lcuts.items()}
if "et" in self.lcuts:
del self.lcuts["et"]

self.log.info(f"Number of bins used: {self.data_vec.size}")

def _get_power_spectra(self, cl, **params_values_nocosmo):
# Get Cl's from the theory code
Dls = {s: cl[s][self.l_bpws] for s, _ in self.lcuts.items()}
Expand All @@ -363,7 +359,7 @@ def _get_power_spectra(self, cl, **params_values_nocosmo):
p = m["pol"]
i = m["ids"]
w = m["bpw"].weight.T
clt = np.dot(w, DlsObs[p, m["nu1"], m["nu2"]])
clt = w @ DlsObs[p, m["t1"], m["t2"]]
ps_vec[i] = clt

return ps_vec
Loading

0 comments on commit 1ff870d

Please sign in to comment.