From 9a80163da3bb5dada70217811d0a4d53aa391a41 Mon Sep 17 00:00:00 2001 From: Ian Harrison Date: Mon, 4 Oct 2021 17:06:39 +0100 Subject: [PATCH] flake8 for codestyle (#30) * added flake8 configs, started on some code style * added back codestyle workflow * code style passes :-) * restored imports in mflike test * made codestyle checks coherent with cobaya --- .github/workflows/codestyle.yml | 22 ++ setup.cfg | 5 + setup.py | 2 +- soliket/__init__.py | 18 +- soliket/ccl.py | 101 ++++--- soliket/clusters/__init__.py | 2 +- soliket/clusters/clusters.py | 28 +- soliket/clusters/massfunc.py | 6 +- soliket/clusters/survey.py | 61 ++-- soliket/clusters/sz_utils.py | 97 ++++--- soliket/clusters/tinker.py | 4 +- soliket/cross_correlation.py | 27 +- soliket/gaussian.py | 6 +- soliket/gaussian_data.py | 15 +- soliket/lensing/__init__.py | 2 +- soliket/lensing/lensing.py | 52 ++-- soliket/mflike.py | 47 +-- soliket/poisson_data.py | 18 +- soliket/ps.py | 3 +- soliket/tests/test_cross_correlation.py | 20 +- soliket/tests/test_gaussian.py | 2 +- soliket/tests/test_lensing.py | 5 +- soliket/tests/test_lensing_lite.py | 4 +- soliket/tests/test_mflike.py | 37 ++- soliket/tests/test_multi.py | 5 +- soliket/tests/test_ps.py | 10 +- soliket/tests/test_runs.py | 16 +- soliket/theoryforge_MFLike.py | 366 +++++++++++++----------- soliket/utils.py | 6 +- tox.ini | 6 + 30 files changed, 585 insertions(+), 408 deletions(-) create mode 100644 .github/workflows/codestyle.yml create mode 100644 setup.cfg create mode 100644 tox.ini diff --git a/.github/workflows/codestyle.yml b/.github/workflows/codestyle.yml new file mode 100644 index 00000000..a03354e9 --- /dev/null +++ b/.github/workflows/codestyle.yml @@ -0,0 +1,22 @@ +name: Code Style +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] +jobs: + flake8: + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v2 + - name: Install Python 3.x + uses: actions/setup-python@v2 + with: + python-version: 3.x + - name: Install Dependencies + run: | + pip install tox + - name: Check Code Style + run: | + tox -e codestyle \ No newline at end of file diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..8b38282d --- /dev/null +++ b/setup.cfg @@ -0,0 +1,5 @@ +[flake8] +select = E713,E704,E703,E714,E741,E10,E11,E20,E22,E23,E25,E27,E301,E302,E304,E9, + F405,F406,F5,F6,F7,F8,W1,W2,W3,W6,E501 +max-line-length = 90 +exclude = .tox,build \ No newline at end of file diff --git a/setup.py b/setup.py index a77130b2..9075ccc5 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ "cobaya", "sacc", "pyccl", - "fgspectra @ git+https://github.com/simonsobs/fgspectra@act_sz_x_cib#egg=fgspectra", + "fgspectra @ git+https://github.com/simonsobs/fgspectra@act_sz_x_cib#egg=fgspectra", # noqa E501 "mflike @ git+https://github.com/simonsobs/lat_mflike@master" ], test_suite="soliket.tests", diff --git a/soliket/__init__.py b/soliket/__init__.py index d74d421b..d5d524ce 100644 --- a/soliket/__init__.py +++ b/soliket/__init__.py @@ -1,12 +1,12 @@ -from .lensing import LensingLiteLikelihood, LensingLikelihood -from .gaussian import GaussianLikelihood, MultiGaussianLikelihood -from .ps import PSLikelihood, BinnedPSLikelihood -from .clusters import ClusterLikelihood -from .mflike import MFLike +from .lensing import LensingLiteLikelihood, LensingLikelihood # noqa: F401 +from .gaussian import GaussianLikelihood, MultiGaussianLikelihood # noqa: F401 +from .ps import PSLikelihood, BinnedPSLikelihood # noqa: F401 +from .clusters import ClusterLikelihood # noqa: F401 +from .mflike import MFLike # noqa: F401 try: - import pyccl as ccl - from .ccl import CCL - from .cross_correlation import CrossCorrelationLikelihood + import pyccl as ccl # noqa: F401 + from .ccl import CCL # noqa: F401 + from .cross_correlation import CrossCorrelationLikelihood # noqa: F401 except ImportError: print('Skipping CCL module as pyCCL is not installed') - pass \ No newline at end of file + pass diff --git a/soliket/ccl.py b/soliket/ccl.py index 47da5226..6f490a0e 100644 --- a/soliket/ccl.py +++ b/soliket/ccl.py @@ -1,35 +1,40 @@ """ Simple CCL wrapper with function to return CCL cosmo object, and (optional) result of -calling various custom methods on the ccl object. The idea is this is included with the CCL -package, so it can easily be used as a Cobaya component whenever CCL is installed, here for now. +calling various custom methods on the ccl object. The idea is this is included with the +CCL package, so it can easily be used as a Cobaya component whenever CCL is installed, +here for now. First version by AL. Untested example of usage at https://github.com/cmbant/SZCl_like/blob/methods/szcl_like/szcl_like.py -get_CCL results a dictionary of results, where results['cosmo'] is the CCL cosmology object. +get_CCL results a dictionary of results, where results['cosmo'] is the CCL cosmology +object. Classes that need other CCL-computed results (without additional free parameters), should pass them in the requirements list. -e.g. a Likelihood with get_requirements() returning {'CCL': {'methods:{'name': self.method}}} -[where self is the Theory instance] will have results['name'] set to the result -of self.method(cosmo) being called with the CCL cosmo object. +e.g. a Likelihood with get_requirements() returning +{'CCL': {'methods:{'name': self.method}}} [where self is the Theory instance] will have +results['name'] set to the result of self.method(cosmo) being called with theCCL cosmo +object. -The Likelihood class can therefore handle for itself which results specifically it needs from CCL, -and just give the method to return them (to be called and cached by Cobaya with the right -parameters at the appropriate time). +The Likelihood class can therefore handle for itself which results specifically it needs +from CCL, and just give the method to return them (to be called and cached by Cobaya with +the right parameters at the appropriate time). -Alternatively the Likelihood can compute what it needs from results['cosmo'], however in this -case it will be up to the Likelihood to cache the results appropriately itself. +Alternatively the Likelihood can compute what it needs from results['cosmo'], however in +this case it will be up to the Likelihood to cache the results appropriately itself. -Note that this approach preclude sharing results other than the cosmo object itself between different likelihoods. +Note that this approach preclude sharing results other than the cosmo object itself +between different likelihoods. -Also note lots of things still cannot be done consistently in CCL, so this is far from general. +Also note lots of things still cannot be done consistently in CCL, so this is far from +general. April 2021: ----------- -Second version by PL. Using CCL's newly implemented cosmology calculator. +Second version by PL. Using CCL's newly implemented cosmology calculator. """ # For Cobaya docs see @@ -41,6 +46,7 @@ from cobaya.theory import Theory import pyccl as ccl + class CCL(Theory): # Options for Pk. # Default options can be set globally, and updated from requirements as needed @@ -74,7 +80,8 @@ def must_provide(self, **requirements): self.kmax = max(self.kmax, options.get('kmax', self.kmax)) self.z = np.unique(np.concatenate( - (np.atleast_1d(options.get("z", self._default_z_sampling)), np.atleast_1d(self.z)))) + (np.atleast_1d(options.get("z", self._default_z_sampling)), + np.atleast_1d(self.z)))) # Dictionary of the things CCL needs from CAMB/CLASS needs = {} @@ -115,7 +122,7 @@ def calculate(self, state, want_derived=True, **params_values_dict): distance = self.provider.get_comoving_radial_distance(self.z) hubble_z = self.provider.get_Hubble(self.z) H0 = hubble_z[0] - h = H0/100 + h = H0 / 100 E_of_z = hubble_z / H0 Omega_c = self.provider.get_param('omch2') / h ** 2 @@ -130,9 +137,8 @@ def calculate(self, state, want_derived=True, **params_values_dict): # Array z is sorted in ascending order. CCL requires an ascending scale # factor as input a = 1. / (1 + self.z[::-1]) - #growth = ccl.background.growth_factor(cosmo, a) - #fgrowth = ccl.background.growth_rate(cosmo, a) - + # growth = ccl.background.growth_factor(cosmo, a) + # fgrowth = ccl.background.growth_rate(cosmo, a) if self.kmax: for pair in self._var_pairs: @@ -141,38 +147,47 @@ def calculate(self, state, want_derived=True, **params_values_dict): Pk_lin = np.flip(Pk_lin, axis=0) if self.nonlinear: - _, z, Pk_nonlin = self.provider.get_Pk_grid(var_pair=pair, nonlinear=True) + _, z, Pk_nonlin = self.provider.get_Pk_grid(var_pair=pair, + nonlinear=True) Pk_nonlin = np.flip(Pk_nonlin, axis=0) - # Create a CCL cosmology object. Because we are giving it background - # quantities, it should not depend on the cosmology parameters given + # Create a CCL cosmology object. Because we are giving it background + # quantities, it should not depend on the cosmology parameters given cosmo = ccl.CosmologyCalculator( - Omega_c=Omega_c, Omega_b=Omega_b, h=h, sigma8=0.8, n_s=0.96, - background={'a': a, - 'chi': distance, - 'h_over_h0': E_of_z}, - pk_linear={'a': a, - 'k': k, - 'delta_matter:delta_matter': Pk_lin}, - pk_nonlin={'a': a, - 'k': k, - 'delta_matter:delta_matter': Pk_nonlin} - ) - + Omega_c=Omega_c, + Omega_b=Omega_b, + h=h, + sigma8=0.8, + n_s=0.96, + background={'a': a, + 'chi': distance, + 'h_over_h0': E_of_z}, + pk_linear={'a': a, + 'k': k, + 'delta_matter:delta_matter': Pk_lin}, # noqa E501 + pk_nonlin={'a': a, + 'k': k, + 'delta_matter:delta_matter': Pk_nonlin} # noqa E501 + ) + else: cosmo = ccl.CosmologyCalculator( - Omega_c=Omega_c, Omega_b=Omega_b, h=h, sigma8=0.8, n_s=0.96, - background={'a': a, - 'chi': distance, - 'h_over_h0': E_of_z}, - pk_linear={'a': a, - 'k': k, - 'delta_matter:delta_matter': Pk_lin} - ) + Omega_c=Omega_c, + Omega_b=Omega_b, + h=h, + sigma8=0.8, + n_s=0.96, + background={'a': a, + 'chi': distance, + 'h_over_h0': E_of_z}, + pk_linear={'a': a, + 'k': k, + 'delta_matter:delta_matter': Pk_lin} # noqa E501 + ) state['CCL'] = {'cosmo': cosmo} for required_result, method in self._required_results.items(): state['CCL'][required_result] = method(cosmo) def get_CCL(self): - return self._current_state['CCL'] \ No newline at end of file + return self._current_state['CCL'] diff --git a/soliket/clusters/__init__.py b/soliket/clusters/__init__.py index 1dfbb1c3..d790c894 100644 --- a/soliket/clusters/__init__.py +++ b/soliket/clusters/__init__.py @@ -1 +1 @@ -from .clusters import ClusterLikelihood +from .clusters import ClusterLikelihood # noqa: F401 diff --git a/soliket/clusters/clusters.py b/soliket/clusters/clusters.py index fc25b630..a7c3bfdb 100644 --- a/soliket/clusters/clusters.py +++ b/soliket/clusters/clusters.py @@ -52,9 +52,10 @@ def get_requirements(self): def _get_sz_model(self, cosmo): model = SZModel() model.hmf = ccl.halos.MassFuncTinker08(cosmo, mass_def=self.mdef) - model.hmb = ccl.halos.HaloBiasTinker10(cosmo, mass_def=self.mdef, mass_def_strict=False) + model.hmb = ccl.halos.HaloBiasTinker10(cosmo, + mass_def=self.mdef, mass_def_strict=False) model.hmc = ccl.halos.HMCalculator(cosmo, model.hmf, model.hmb, self.mdef) - model.szk = SZTracer(cosmo) + # model.szk = SZTracer(cosmo) return model def _get_catalog(self): @@ -77,7 +78,8 @@ def _get_om(self): ) def _get_ob(self): - return (self.theory.get_param("ombh2")) / ((self.theory.get_param("H0") / 100.0) ** 2) + return (self.theory.get_param("ombh2")) \ + / ((self.theory.get_param("H0") / 100.0) ** 2) def _get_Ez(self): return self.theory.get_Hubble(self.zarr) / self.theory.get_param("H0") @@ -94,12 +96,13 @@ def _get_DAz_interpolator(self): def _get_HMF(self): h = self.theory.get_param("H0") / 100.0 - Pk_interpolator = self.theory.get_Pk_interpolator(("delta_nonu", "delta_nonu"), nonlinear=False).P + Pk_interpolator = self.theory.get_Pk_interpolator(("delta_nonu", "delta_nonu"), + nonlinear=False).P pks = Pk_interpolator(self.zarr, self.k) # pkstest = Pk_interpolator(0.125, self.k ) # print (pkstest * h**3 ) - Ez = self._get_Ez() # self.theory.get_Hubble(self.zarr) / self.theory.get_param("H0") + Ez = self._get_Ez() om = self._get_om() hmf = mf.HMF(om, Ez, pk=pks * h ** 3, kh=self.k / h, zarr=self.zarr) @@ -113,7 +116,8 @@ def _get_param_vals(self): H0 = self.theory.get_param("H0") ob = self._get_ob() om = self._get_om() - param_vals = {"om": om, "ob": ob, "H0": H0, "B0": B0, "scat": scat, "massbias": massbias} + param_vals = {"om": om, "ob": ob, "H0": H0, "B0": B0, "scat": scat, + "massbias": massbias} return param_vals def _get_rate_fn(self, **kwargs): @@ -152,7 +156,8 @@ def _get_dVdz(self): """ DA_z = self.theory.get_angular_diameter_distance(self.zarr) - dV_dz = DA_z ** 2 * (1.0 + self.zarr) ** 2 / (self.theory.get_Hubble(self.zarr) / C_KM_S) + dV_dz = DA_z ** 2 * (1.0 + self.zarr) ** 2\ + / (self.theory.get_Hubble(self.zarr) / C_KM_S) # dV_dz *= (self.theory.get_param("H0") / 100.0) ** 3.0 # was h0 return dV_dz @@ -175,13 +180,16 @@ def _get_n_expected(self, **kwargs): for Yt, frac in zip(self.survey.Ythresh, self.survey.frac_of_survey): Pfunc = self.szutils.PfuncY(Yt, HMF.M, z_arr, param_vals, Ez_fn, DA_fn) - N_z = np.trapz(dn_dzdm * Pfunc, dx=np.diff(HMF.M[:, None] / h, axis=0), axis=0) - Ntot += np.trapz(N_z * dVdz, x=z_arr) * 4.0 * np.pi * self.survey.fskytotal * frac + N_z = np.trapz(dn_dzdm * Pfunc, dx=np.diff(HMF.M[:, None] / h, axis=0), + axis=0) + Ntot += np.trapz(N_z * dVdz, x=z_arr) \ + * 4.0 * np.pi * self.survey.fskytotal * frac # To test Mass function against Nemo. # Pfunc = 1. # N_z = np.trapz(dn_dzdm * Pfunc, dx=np.diff(HMF.M[:, None]/h, axis=0), axis=0) - # Ntot = np.trapz(N_z * dVdz, x=z_arr) * 4.0 * np.pi * (600./(4*np.pi * (180/np.pi)**2)) + # Ntot = np.trapz(N_z * dVdz, x=z_arr) \ + # * 4.0 * np.pi * (600./(4*np.pi * (180/np.pi)**2)) # print("Ntot", Ntot) return Ntot diff --git a/soliket/clusters/massfunc.py b/soliket/clusters/massfunc.py index dd4b0d75..d8bc13ef 100644 --- a/soliket/clusters/massfunc.py +++ b/soliket/clusters/massfunc.py @@ -30,7 +30,8 @@ def __init__(self, om, Ez, pk=None, kh=None, zarr=None): # Initialize rho critical values for usage self.om = om - self.rho_crit0H100 = (3. / (8. * np.pi) * (100 * 1.e5) ** 2.) / G_CGS * MPC2CM / MSUN_CGS + self.rho_crit0H100 = (3. / (8. * np.pi) * (100 * 1.e5) ** 2.) \ + / G_CGS * MPC2CM / MSUN_CGS self.rhoc0om = self.rho_crit0H100 * self.om if pk is None: @@ -60,7 +61,8 @@ def dn_dM(self, M, delta): M here is in MDeltam but we can convert """ delts = self.critdensThreshold(delta) - dn_dlnm = dn_dlogM(M, self.zarr, self.rhoc0om, delts, self.kh, self.pk, 'comoving') + dn_dlnm = dn_dlogM(M, self.zarr, self.rhoc0om, delts, self.kh, self.pk, + 'comoving') dn_dm = dn_dlnm / M[:, None] return dn_dm diff --git a/soliket/clusters/survey.py b/soliket/clusters/survey.py index f1443bc5..a254c0f6 100644 --- a/soliket/clusters/survey.py +++ b/soliket/clusters/survey.py @@ -5,6 +5,7 @@ import astropy.io.fits as pyfits # from astLib import astWCS from astropy.io import fits +from astropy.wcs import WCS import astropy.table as atpy @@ -35,25 +36,27 @@ def read_mock_cat(fitsfile, qmin): def read_matt_mock_cat(fitsfile, qmin): list = fits.open(fitsfile) data = list[1].data - ra = data.field("RADeg") - dec = data.field("decDeg") + # ra = data.field("RADeg") + # dec = data.field("decDeg") z = data.field("redshift") zerr = data.field("redshiftErr") Y0 = data.field("fixed_y_c") Y0err = data.field("fixed_err_y_c") SNR = data.field("fixed_SNR") - M = data.field("true_M500") + # M = data.field("true_M500") ind = np.where(SNR >= qmin)[0] return z[ind], zerr[ind], Y0[ind], Y0err[ind] def loadAreaMask(extName, DIR): - """Loads the survey area mask (i.e., after edge-trimming and point source masking, produced by nemo). + """Loads the survey area mask + (i.e., after edge-trimming and point source masking, produced by nemo). Returns map array, wcs """ areaImg = pyfits.open(os.path.join(DIR, "areaMask%s.fits.gz" % (extName))) areaMap = areaImg[0].data - wcs = astWCS.WCS(areaImg[0].header, mode="pyfits") + # wcs = astWCS.WCS(areaImg[0].header, mode="pyfits") + wcs = WCS(areaImg[0].header) areaImg.close() return areaMap, wcs @@ -63,9 +66,11 @@ def loadRMSmap(extName, DIR): """Loads the survey RMS map (produced by nemo). Returns map array, wcs """ - areaImg = pyfits.open(os.path.join(DIR, "RMSMap_Arnaud_M2e14_z0p4%s.fits.gz" % (extName))) + areaImg = pyfits.open(os.path.join(DIR, + "RMSMap_Arnaud_M2e14_z0p4%s.fits.gz" % (extName))) areaMap = areaImg[0].data - wcs = astWCS.WCS(areaImg[0].header, mode="pyfits") + # wcs = astWCS.WCS(areaImg[0].header, mode="pyfits") + wcs = WCS(areaImg[0].header) areaImg.close() return areaMap, wcs @@ -74,13 +79,14 @@ def loadRMSmap(extName, DIR): def loadQ(source, tileNames=None): """Load the filter mismatch function Q as a dictionary of spline fits. Args: - source (NemoConfig or str): Either the path to a .fits table (containing Q fits for all tiles - this - is normally selFn/QFit.fits), or a NemoConfig object (from which the path and tiles to use will - be inferred). - tileNames (optional, list): A list of tiles for which the Q function will be extracted. If - source is a NemoConfig object, this should be set to None. + source (NemoConfig or str): Either the path to a .fits table (containing Q fits + for all tiles - this is normally selFn/QFit.fits), or a NemoConfig object + (from which the path and tiles to use will be inferred). + tileNames (optional, list): A list of tiles for which the Q function will be + extracted. If source is a NemoConfig object, this should be set to None. Returns: - A dictionary (with tile names as keys), containing spline knots for the Q function for each tile. + A dictionary (with tile names as keys), containing spline knots for the Q + function for each tile. """ if type(source) == str: combinedQTabFileName = source @@ -93,20 +99,24 @@ def loadQ(source, tileNames=None): combinedQTab = atpy.Table().read(combinedQTabFileName) for key in combinedQTab.keys(): if key != "theta500Arcmin": - tckDict[key] = interpolate.splrep(combinedQTab["theta500Arcmin"], combinedQTab[key]) + tckDict[key] = interpolate.splrep(combinedQTab["theta500Arcmin"], + combinedQTab[key]) else: if tileNames is None: raise Exception( - "If source does not point to a complete QFit.fits file, you need to supply tileNames." + "If source does not point to a complete QFit.fits file,\ + you need to supply tileNames." ) for tileName in tileNames: - tab = atpy.Table().read(combinedQTabFileName.replace(".fits", "#%s.fits" % (tileName))) + tab = atpy.Table().read(combinedQTabFileName.replace(".fits", + "#%s.fits" % (tileName))) tckDict[tileName] = interpolate.splrep(tab["theta500Arcmin"], tab["Q"]) return tckDict class SurveyData: - def __init__(self, nemoOutputDir, ClusterCat, qmin=5.6, szarMock=False, tiles=False, num_noise_bins=20): + def __init__(self, nemoOutputDir, ClusterCat, qmin=5.6, szarMock=False, tiles=False, + num_noise_bins=20): self.nemodir = nemoOutputDir self.tckQFit = loadQ(self.nemodir + "/QFit.fits") @@ -116,17 +126,19 @@ def __init__(self, nemoOutputDir, ClusterCat, qmin=5.6, szarMock=False, tiles=Fa if szarMock: print("mock catalog") - self.clst_z, self.clst_zerr, self.clst_y0, self.clst_y0err = read_mock_cat(ClusterCat, self.qmin) + self.clst_z, self.clst_zerr, self.clst_y0, self.clst_y0err = \ + read_mock_cat(ClusterCat, self.qmin) else: print("real catalog") - self.clst_z, self.clst_zerr, self.clst_y0, self.clst_y0err = read_clust_cat( - ClusterCat, self.qmin - ) + self.clst_z, self.clst_zerr, self.clst_y0, self.clst_y0err = \ + read_clust_cat(ClusterCat, self.qmin) if tiles: self.filetile = self.nemodir + "tileAreas.txt" - self.tilenames = np.loadtxt(self.filetile, dtype=np.str, usecols=0, unpack=True) - self.tilearea = np.loadtxt(self.filetile, dtype=np.float, usecols=1, unpack=True) + self.tilenames = np.loadtxt(self.filetile, dtype=np.str, + usecols=0, unpack=True) + self.tilearea = np.loadtxt(self.filetile, dtype=np.float, + usecols=1, unpack=True) self.fsky = [] self.mask = [] @@ -152,7 +164,8 @@ def __init__(self, nemoOutputDir, ClusterCat, qmin=5.6, szarMock=False, tiles=Fa self.rmstotal = self.rms[self.rms > 0] self.fskytotal = 987.5 / 41252.9612 - count_temp, bin_edge = np.histogram(np.log10(self.rmstotal), bins=self.num_noise_bins) + count_temp, bin_edge = np.histogram(np.log10(self.rmstotal), + bins=self.num_noise_bins) self.frac_of_survey = count_temp * 1.0 / np.sum(count_temp) self.Ythresh = 10 ** ((bin_edge[:-1] + bin_edge[1:]) / 2.0) diff --git a/soliket/clusters/sz_utils.py b/soliket/clusters/sz_utils.py index 828abb64..45f4e205 100644 --- a/soliket/clusters/sz_utils.py +++ b/soliket/clusters/sz_utils.py @@ -1,6 +1,6 @@ import numpy as np from scipy import interpolate -from astropy.cosmology import FlatLambdaCDM +# from astropy.cosmology import FlatLambdaCDM # from nemo import signals from .massfunc import MPC2CM as Mpc_in_cm @@ -9,14 +9,16 @@ # from .clusters import C_KM_S as C_in_kms -rho_crit0H100 = (3. / (8. * np.pi) * (100. * 1.e5) ** 2.) / G_in_cgs * Mpc_in_cm / MSun_in_g +rho_crit0H100 = (3. / (8. * np.pi) * (100. * 1.e5) ** 2.) \ + / G_in_cgs * Mpc_in_cm / MSun_in_g def gaussian(xx, mu, sig, noNorm=False): if noNorm: return np.exp(-1.0 * (xx - mu) ** 2 / (2.0 * sig ** 2.0)) else: - return 1.0 / (sig * np.sqrt(2 * np.pi)) * np.exp(-1.0 * (xx - mu) ** 2 / (2.0 * sig ** 2.0)) + return 1.0 / (sig * np.sqrt(2 * np.pi)) \ + * np.exp(-1.0 * (xx - mu) ** 2 / (2.0 * sig ** 2.0)) class szutils: @@ -24,7 +26,8 @@ def __init__(self, Survey): self.LgY = np.arange(-6, -2.5, 0.01) self.Survey = Survey - # self.rho_crit0H100 = (3. / (8. * np.pi) * (100. * 1.e5)**2.) / G_in_cgs * Mpc_in_cm / MSun_in_g + # self.rho_crit0H100 = (3. / (8. * np.pi) * \ + # (100. * 1.e5)**2.) / G_in_cgs * Mpc_in_cm / MSun_in_g def P_Yo(self, LgY, M, z, param_vals, Ez_fn, Da_fn): H0 = param_vals["H0"] @@ -50,7 +53,8 @@ def P_Yo(self, LgY, M, z, param_vals, Ez_fn, Da_fn): numer = -1.0 * (np.log(Y / Ytilde)) ** 2 ans = ( - 1.0 / (param_vals["scat"] * np.sqrt(2 * np.pi)) * np.exp(numer / (2.0 * param_vals["scat"] ** 2)) + 1.0 / (param_vals["scat"] * np.sqrt(2 * np.pi)) * + np.exp(numer / (2.0 * param_vals["scat"] ** 2)) ) return ans @@ -74,7 +78,8 @@ def P_Yo_vec(self, LgY, M, z, param_vals, Ez_fn, Da_fn): numer = -1.0 * (np.log(Y / Ytilde)) ** 2 ans = ( - 1.0 / (param_vals["scat"] * np.sqrt(2 * np.pi)) * np.exp(numer / (2.0 * param_vals["scat"] ** 2)) + 1.0 / (param_vals["scat"] * np.sqrt(2 * np.pi)) * + np.exp(numer / (2.0 * param_vals["scat"] ** 2)) ) return ans @@ -88,7 +93,8 @@ def P_of_gt_SN(self, LgY, MM, zz, Ynoise, param_vals, Ez_fn, Da_fn): Y = 10 ** LgY sig_tr = np.outer(np.ones([MM.shape[0], MM.shape[1]]), self.Y_erf(Y, Ynoise)) - sig_thresh = np.reshape(sig_tr, (MM.shape[0], MM.shape[1], len(self.Y_erf(Y, Ynoise)))) + sig_thresh = np.reshape(sig_tr, + (MM.shape[0], MM.shape[1], len(self.Y_erf(Y, Ynoise)))) LgYa = np.outer(np.ones([MM.shape[0], MM.shape[1]]), LgY) LgYa2 = np.reshape(LgYa, (MM.shape[0], MM.shape[1], len(LgY))) @@ -154,7 +160,7 @@ def Pfunc_per_parallel(self, Marr, zarr, Y_c, Y_err, param_vals, Ez_fn, Da_fn): P_Y_sig = self.Y_prob(Y_c, self.LgY, Y_err) P_Y = np.nan_to_num(self.P_Yo(self.LgY, Marr, zarr, param_vals, Ez_fn, Da_fn)) - ans = np.trapz(P_Y * P_Y_sig, x=LgY, axis=2) + ans = np.trapz(P_Y * P_Y_sig, x=self.LgY, axis=2) return ans @@ -176,15 +182,16 @@ def Pfunc_per_zarr(self, MM, z_c, Y_c, Y_err, int_HMF, param_vals): """Routines from nemo (author: Matt Hilton ) to limit dependencies""" -# ------------------------------------------------------------------------------------------------------------ +# ---------------------------------------------------------------------------------------- def calcR500Mpc(z, M500, Ez_fn, H0): """Given z, M500 (in MSun), returns R500 in Mpc, with respect to critical density. - + """ if type(M500) == str: raise Exception( - "M500 is a string - check M500MSun in your .yml config file: use, e.g., 1.0e+14 (not 1e14 or 1e+14)" + "M500 is a string - check M500MSun in your .yml config file:\ + use, e.g., 1.0e+14 (not 1e14 or 1e+14)" ) Ez = Ez_fn(z) @@ -195,10 +202,11 @@ def calcR500Mpc(z, M500, Ez_fn, H0): return R500Mpc -# ------------------------------------------------------------------------------------------------------------ +# ---------------------------------------------------------------------------------------- def calcTheta500Arcmin(z, M500, Ez_fn, Da_fn, H0): - """Given z, M500 (in MSun), returns angular size equivalent to R500, with respect to critical density. - + """Given z, M500 (in MSun), returns angular size equivalent to R500, with respect to + critical density. + """ R500Mpc = calcR500Mpc(z, M500, Ez_fn, H0) @@ -209,10 +217,10 @@ def calcTheta500Arcmin(z, M500, Ez_fn, Da_fn, H0): return theta500Arcmin -# ------------------------------------------------------------------------------------------------------------ +# ---------------------------------------------------------------------------------------- def calcQ(theta500Arcmin, tck): """Returns Q, given theta500Arcmin, and a set of spline fit knots for (theta, Q). - + """ # Q=np.poly1d(coeffs)(theta500Arcmin) @@ -221,19 +229,21 @@ def calcQ(theta500Arcmin, tck): return Q -# ------------------------------------------------------------------------------------------------------------ +# ---------------------------------------------------------------------------------------- def calcFRel(z, M500, obsFreqGHz=148.0, Ez_fn=None): - """Calculates relativistic correction to SZ effect at specified frequency, given z, M500 in MSun. - - This assumes the Arnaud et al. (2005) M-T relation, and applies formulae of Itoh et al. (1998) - + """Calculates relativistic correction to SZ effect at specified frequency, given z, + M500 in MSun. + + This assumes the Arnaud et al. (2005) M-T relation, and applies formulae of + Itoh et al. (1998) + As for H13, we return fRel = 1 + delta_SZE (see also Marriage et al. 2011) """ # NOTE: we should define constants somewhere else... h = 6.63e-34 kB = 1.38e-23 - sigmaT = 6.6524586e-29 + # sigmaT = 6.6524586e-29 me = 9.11e-31 e = 1.6e-19 c = 3e8 @@ -271,7 +281,9 @@ def calcFRel(z, M500, obsFreqGHz=148.0, Ez_fn=None): - (44 / 5.0) * Xtw ** 4 + (11 / 30.0) * Xtw ** 5 + np.power(Stw, 2) - * (-(434 / 5.0) + (658 / 5.0) * Xtw - (242 / 5.0) * Xtw ** 2 + (143 / 30.0) * Xtw ** 3) + * (-(434 / 5.0) + (658 / 5.0) * Xtw + - (242 / 5.0) * Xtw ** 2 + + (143 / 30.0) * Xtw ** 3) + np.power(Stw, 4) * (-(44 / 5.0) + (187 / 60.0) * Xtw) ) @@ -294,7 +306,8 @@ def calcFRel(z, M500, obsFreqGHz=148.0, Ez_fn=None): + (64 / 7.0) * Xtw ** 5 ) + np.power(Stw, 4) - * (-(18594 / 35.0) + (205003 / 280.0) * Xtw - (1920 / 7.0) * Xtw ** 2 + (1024 / 35.0) * Xtw ** 3) + * (-(18594 / 35.0) + (205003 / 280.0) * Xtw + - (1920 / 7.0) * Xtw ** 2 + (1024 / 35.0) * Xtw ** 3) + np.power(Stw, 6) * (-(544 / 21.0) + (992 / 105.0) * Xtw) ) @@ -330,7 +343,8 @@ def calcFRel(z, M500, obsFreqGHz=148.0, Ez_fn=None): + (18689 / 140.0) * Xtw ** 5 ) + np.power(Stw, 6) - * (-(70414 / 21.0) + (465992 / 105.0) * Xtw - (11792 / 7.0) * Xtw ** 2 + (19778 / 105.0) * Xtw ** 3) + * (-(70414 / 21.0) + (465992 / 105.0) * Xtw + - (11792 / 7.0) * Xtw ** 2 + (19778 / 105.0) * Xtw ** 3) + np.power(Stw, 8) * (-(682 / 7.0) + (7601 / 210.0) * Xtw) ) @@ -345,7 +359,7 @@ def calcFRel(z, M500, obsFreqGHz=148.0, Ez_fn=None): return fRel -# ------------------------------------------------------------------------------------------------------------ +# ---------------------------------------------------------------------------------------- def y0FromLogM500( log10M500, z, @@ -359,24 +373,27 @@ def y0FromLogM500( Ez_fn=None, Da_fn=None ): - """Predict y0~ given logM500 (in MSun) and redshift. Default scaling relation parameters are A10 (as in - H13). - + """Predict y0~ given logM500 (in MSun) and redshift. Default scaling relation + parameters are A10 (as in H13). + Use cosmoModel (astropy.cosmology object) to change/specify cosmological parameters. - - fRelWeightsDict is used to account for the relativistic correction when y0~ has been constructed - from multi-frequency maps. Weights should sum to 1.0; keys are observed frequency in GHz. - + + fRelWeightsDict is used to account for the relativistic correction when y0~ has been + constructed from multi-frequency maps. Weights should sum to 1.0; keys are observed + frequency in GHz. + Returns y0~, theta500Arcmin, Q - + """ if type(Mpivot) == str: raise Exception( - "Mpivot is a string - check Mpivot in your .yml config file: use, e.g., 3.0e+14 (not 3e14 or 3e+14)" + "Mpivot is a string - check Mpivot in your .yml config file:\ + use, e.g., 3.0e+14 (not 3e14 or 3e+14)" ) - # Filtering/detection was performed with a fixed fiducial cosmology... so we don't need to recalculate Q + # Filtering/detection was performed with a fixed fiducial cosmology... so we don't + # need to recalculate Q. # We just need to recalculate theta500Arcmin and E(z) only M500 = np.power(10, log10M500) theta500Arcmin = calcTheta500Arcmin(z, M500, Ez_fn, Da_fn, H0) @@ -384,8 +401,9 @@ def y0FromLogM500( Ez = Ez_fn(z) - # Relativistic correction: now a little more complicated, to account for fact y0~ maps are weighted sum - # of individual frequency maps, and relativistic correction size varies with frequency + # Relativistic correction: now a little more complicated, to account for fact y0~ maps + # are weighted sum of individual frequency maps, and relativistic correction size + # varies with frequency fRels = [] freqWeights = [] for obsFreqGHz in fRelWeightsDict.keys(): @@ -395,7 +413,8 @@ def y0FromLogM500( # UPP relation according to H13 # NOTE: m in H13 is M/Mpivot - # NOTE: this goes negative for crazy masses where the Q polynomial fit goes -ve, so ignore those + # NOTE: this goes negative for crazy masses where the Q polynomial fit goes -ve, so + # ignore those y0pred = tenToA0 * np.power(Ez, 2) * np.power(M500 / Mpivot, 1 + B0) * Q * fRel return y0pred, theta500Arcmin, Q diff --git a/soliket/clusters/tinker.py b/soliket/clusters/tinker.py index 6a63a7f4..389f187d 100644 --- a/soliket/clusters/tinker.py +++ b/soliket/clusters/tinker.py @@ -7,7 +7,7 @@ tinker_data = np.transpose([[float(x) for x in line.split()] for line in - """200 0.186 1.47 2.57 1.19 + """200 0.186 1.47 2.57 1.19 300 0.200 1.52 2.25 1.27 400 0.212 1.56 2.05 1.34 600 0.218 1.61 1.87 1.45 @@ -26,7 +26,7 @@ def tinker_params_spline(delta, z=None): tinker_splines = [] D, data = np.log(tinker_data[0]), tinker_data[1:] for y in data: - # Extend to large Delta + # Extend to large Delta p = np.polyfit(D[-2:], y[-2:], 1) x = np.hstack((D, D[-1] + 3.)) y = np.hstack((y, np.polyval(p, x[-1]))) diff --git a/soliket/cross_correlation.py b/soliket/cross_correlation.py index 8db06ba2..c2f1e333 100644 --- a/soliket/cross_correlation.py +++ b/soliket/cross_correlation.py @@ -1,6 +1,6 @@ """ Simple likelihood for CMB-galaxy cross-correlations using the cobaya -CCL module. +CCL module. First version by Pablo Lemos """ @@ -9,11 +9,12 @@ from .gaussian import GaussianData, GaussianLikelihood import pyccl as ccl + class CrossCorrelationLikelihood(GaussianLikelihood): def initialize(self): self.dndz = np.loadtxt(self.dndz_file) - x,y,dy = self._get_data() + x, y, dy = self._get_data() cov = np.diag(dy**2) self.data = GaussianData("CrossCorrelation", x, y, cov) @@ -32,7 +33,7 @@ def _get_data(self, **params_values): self.ell_cross = data_cross[0] cl_cross = data_cross[1] - cl_cross_err = data_cross[2] + cl_cross_err = data_cross[2] x = np.concatenate([self.ell_auto, self.ell_cross]) y = np.concatenate([cl_auto, cl_cross]) @@ -43,17 +44,21 @@ def _get_data(self, **params_values): def _get_theory(self, **params_values): cosmo = self.provider.get_CCL()['cosmo'] - tracer_g = ccl.NumberCountsTracer(cosmo, has_rsd=False, dndz = self.dndz.T, - bias =(self.dndz[:,0], params_values['b1']*np.ones(len(self.dndz[:,0]))), - mag_bias = (self.dndz[:,0], params_values['s1']*np.ones(len(self.dndz[:,0]))) - ) - tracer_k = ccl.CMBLensingTracer(cosmo, z_source = 1060) + tracer_g = ccl.NumberCountsTracer(cosmo, has_rsd=False, dndz=self.dndz.T, + bias=(self.dndz[:, 0], + params_values['b1'] * + np.ones(len(self.dndz[:, 0]))), + mag_bias=(self.dndz[:, 0], + params_values['s1'] * + np.ones(len(self.dndz[:, 0]))) + ) + tracer_k = ccl.CMBLensingTracer(cosmo, z_source=1060) - cl_gg = ccl.cls.angular_cl(cosmo, tracer_g, tracer_g, self.ell_auto) #+ 1e-7 - cl_kg = ccl.cls.angular_cl(cosmo, tracer_k, tracer_g, self.ell_cross) + cl_gg = ccl.cls.angular_cl(cosmo, tracer_g, tracer_g, self.ell_auto) # + 1e-7 + cl_kg = ccl.cls.angular_cl(cosmo, tracer_k, tracer_g, self.ell_cross) return np.concatenate([cl_gg, cl_kg]) def logp(self, **params_values): theory = self._get_theory(**params_values) - return self.data.loglike(theory) \ No newline at end of file + return self.data.loglike(theory) diff --git a/soliket/gaussian.py b/soliket/gaussian.py index 5a1ab68c..425104eb 100644 --- a/soliket/gaussian.py +++ b/soliket/gaussian.py @@ -55,7 +55,8 @@ class MultiGaussianLikelihood(GaussianLikelihood): def __init__(self, info=empty_dict, **kwargs): if 'components' in info: - self.likelihoods = [get_likelihood(*kv) for kv in zip(info['components'], info['options'])] + self.likelihoods = [get_likelihood(*kv) for kv in zip(info['components'], + info['options'])] default_info = merge_info(*[like.get_defaults() for like in self.likelihoods]) default_info.update(info) @@ -87,7 +88,8 @@ def _get_theory(self, **kwargs): def get_requirements(self): - # Reqs with arguments like 'lmax', etc. may have to be carefully treated here to merge + # Reqs with arguments like 'lmax', etc. may have to be carefully treated here to + # merge reqs = {} for like in self.likelihoods: new_reqs = like.get_requirements() diff --git a/soliket/gaussian_data.py b/soliket/gaussian_data.py index c50dfd6d..94578996 100644 --- a/soliket/gaussian_data.py +++ b/soliket/gaussian_data.py @@ -22,7 +22,8 @@ def __init__(self, name, x, y, cov): self.name = str(name) if not (len(x) == len(y) and cov.shape == (len(x), len(x))): - raise ValueError(f"Incompatible shapes! x={x.shape}, y={y.shape}, cov={cov.shape}") + raise ValueError(f"Incompatible shapes! x={x.shape}, y={y.shape}, \ + cov={cov.shape}") self.x = x self.y = y @@ -38,7 +39,8 @@ def __len__(self): return len(self.x) def loglike(self, theory): - return multivariate_normal_logpdf(theory, self.y, self.cov, self.inv_cov, self.log_det) + return multivariate_normal_logpdf(theory, self.y, self.cov, self.inv_cov, + self.log_det) class MultiGaussianData(GaussianData): @@ -71,7 +73,8 @@ def __init__(self, data_list, cross_covs=None): cov = cross_covs[key] if not cov.shape == (len(d1), len(d2)): raise ValueError( - f"Cross-covariance (for {d1.name} x {d2.name}) has wrong shape: {cov.shape}!" + f"Cross-covariance (for {d1.name} x {d2.name}) \ + has wrong shape: {cov.shape}!" ) elif rev_key in cross_covs: cross_covs[key] = cross_covs[rev_key].T @@ -112,7 +115,8 @@ def log_det(self): @property def labels(self): - return [x for y in [[name] * len(d) for name, d in zip(self.names, self.data_list)] for x in y] + return [x for y in [[name] * len(d) for + name, d in zip(self.names, self.data_list)] for x in y] def _index_range(self, name): if name not in self.names: @@ -152,4 +156,5 @@ def plot_cov(self, **kwargs): for j, lj in zip(range(len(self.data)), self.labels) ] - return hv.HeatMap(data).opts(tools=["hover"], width=800, height=800, invert_yaxis=True, xrotation=90) + return hv.HeatMap(data).opts(tools=["hover"], width=800, height=800, + invert_yaxis=True, xrotation=90) diff --git a/soliket/lensing/__init__.py b/soliket/lensing/__init__.py index 3b59cb2e..d69562fc 100644 --- a/soliket/lensing/__init__.py +++ b/soliket/lensing/__init__.py @@ -1 +1 @@ -from .lensing import LensingLiteLikelihood, LensingLikelihood +from .lensing import LensingLiteLikelihood, LensingLikelihood # noqa: F401 diff --git a/soliket/lensing/lensing.py b/soliket/lensing/lensing.py index ffd946e5..24851594 100644 --- a/soliket/lensing/lensing.py +++ b/soliket/lensing/lensing.py @@ -59,40 +59,42 @@ def initialize(self): else: raise LoggedError( self.log, - "The 'data_folder' directory does not exist. " "Check the given path [%s].", + "The 'data_folder' directory does not exist. "\ + "Check the given path [%s].", self.data_folder, ) # Set files where data/covariance are loaded from self.datapath = os.path.join(self.data_folder, self.data_filename) self.covpath = os.path.join(self.data_folder, self.cov_filename) - self.binning_matrix_path = os.path.join(self.data_folder, self.binning_matrix_filename) + self.binning_matrix_path = os.path.join(self.data_folder, + self.binning_matrix_filename) - cov = np.loadtxt(self.covpath) + # cov = np.loadtxt(self.covpath) # Initialize fiducial PS Cls = self._get_fiducial_Cls() # Set the fiducial spectra self.ls = np.arange(0, self.lmax) - self.fcltt = Cls["tt"][0 : self.lmax] - self.fclpp = Cls["pp"][0 : self.lmax] - self.fclee = Cls["ee"][0 : self.lmax] - self.fclte = Cls["te"][0 : self.lmax] - self.fclbb = Cls["bb"][0 : self.lmax] + self.fcltt = Cls["tt"][0: self.lmax] + self.fclpp = Cls["pp"][0: self.lmax] + self.fclee = Cls["ee"][0: self.lmax] + self.fclte = Cls["te"][0: self.lmax] + self.fclbb = Cls["bb"][0: self.lmax] self.thetaclkk = self.fclpp * (self.ls * (self.ls + 1)) ** 2 * 0.25 # load the correction terms generate from the script n1so.py - self.N0cltt = np.loadtxt(os.path.join(self.data_folder, "n0mvdcltt1.txt")).transpose() - self.N0clte = np.loadtxt(os.path.join(self.data_folder, "n0mvdclte1.txt")).transpose() - self.N0clee = np.loadtxt(os.path.join(self.data_folder, "n0mvdclee1.txt")).transpose() - self.N0clbb = np.loadtxt(os.path.join(self.data_folder, "n0mvdclbb1.txt")).transpose() - self.N1clpp = np.loadtxt(os.path.join(self.data_folder, "n1mvdclkk1.txt")).transpose() - self.N1cltt = np.loadtxt(os.path.join(self.data_folder, "n1mvdcltte1.txt")).transpose() - self.N1clte = np.loadtxt(os.path.join(self.data_folder, "n1mvdcltee1.txt")).transpose() - self.N1clee = np.loadtxt(os.path.join(self.data_folder, "n1mvdcleee1.txt")).transpose() - self.N1clbb = np.loadtxt(os.path.join(self.data_folder, "n1mvdclbbe1.txt")).transpose() + self.N0cltt = np.loadtxt(os.path.join(self.data_folder, "n0mvdcltt1.txt")).T + self.N0clte = np.loadtxt(os.path.join(self.data_folder, "n0mvdclte1.txt")).T + self.N0clee = np.loadtxt(os.path.join(self.data_folder, "n0mvdclee1.txt")).T + self.N0clbb = np.loadtxt(os.path.join(self.data_folder, "n0mvdclbb1.txt")).T + self.N1clpp = np.loadtxt(os.path.join(self.data_folder, "n1mvdclkk1.txt")).T + self.N1cltt = np.loadtxt(os.path.join(self.data_folder, "n1mvdcltte1.txt")).T + self.N1clte = np.loadtxt(os.path.join(self.data_folder, "n1mvdcltee1.txt")).T + self.N1clee = np.loadtxt(os.path.join(self.data_folder, "n1mvdcleee1.txt")).T + self.N1clbb = np.loadtxt(os.path.join(self.data_folder, "n1mvdclbbe1.txt")).T self.n0 = np.loadtxt(os.path.join(self.data_folder, "n0mv.txt")) super().initialize() @@ -130,17 +132,17 @@ def _get_data(self): def _get_theory(self, **params_values): cl = self.provider.get_Cl(ell_factor=False) - Cl_theo = cl["pp"][0 : self.lmax] - Cl_tt = cl["tt"][0 : self.lmax] - Cl_ee = cl["ee"][0 : self.lmax] - Cl_te = cl["te"][0 : self.lmax] - Cl_bb = cl["bb"][0 : self.lmax] + Cl_theo = cl["pp"][0: self.lmax] + Cl_tt = cl["tt"][0: self.lmax] + Cl_ee = cl["ee"][0: self.lmax] + Cl_te = cl["te"][0: self.lmax] + Cl_bb = cl["bb"][0: self.lmax] ls = self.ls Clkk_theo = (ls * (ls + 1)) ** 2 * Cl_theo * 0.25 Clkk_binned = self.binning_matrix.dot(Clkk_theo) - Cltt_binned = self.binning_matrix.dot(Cl_tt) + # Cltt_binned = self.binning_matrix.dot(Cl_tt) correction = ( 2 @@ -169,5 +171,5 @@ class LensingLiteLikelihood(BinnedPSLikelihood): lmax: int = 3000 datapath: str = resource_filename("soliket", "lensing/data/binnedauto.txt") covpath: str = resource_filename("soliket", "lensing/data/binnedcov.txt") - binning_matrix_path: str = resource_filename("soliket", "lensing/data/binningmatrix.txt") - + binning_matrix_path: str = resource_filename("soliket", + "lensing/data/binningmatrix.txt") diff --git a/soliket/mflike.py b/soliket/mflike.py index 8dfa1b8f..d99d6b3b 100644 --- a/soliket/mflike.py +++ b/soliket/mflike.py @@ -17,6 +17,7 @@ from .gaussian import GaussianData, GaussianLikelihood + class MFLike(GaussianLikelihood, InstallableLikelihood): _url = "https://portal.nersc.gov/cfs/sobs/users/MFLike_data" _release = "v0.6" @@ -52,7 +53,8 @@ def initialize(self): else: raise LoggedError( self.log, - "The 'data_folder' directory does not exist. " "Check the given path [%s].", + "The 'data_folder' directory does not exist. "\ + "Check the given path [%s].", self.data_folder, ) @@ -63,20 +65,19 @@ def initialize(self): self.lmax_theory = 9000 self.requested_cls = ["tt", "te", "ee"] - self.expected_params_fg = ["a_tSZ", "a_kSZ", "a_p", "beta_p", - "a_c", "beta_c", "a_s", "a_gtt", "a_gte", "a_gee", - "a_psee", "a_pste", "xi", "T_d"] - + "a_c", "beta_c", "a_s", "a_gtt", "a_gte", "a_gee", + "a_psee", "a_pste", "xi", "T_d"] + self.expected_params_nuis = ["bandint_shift_93", "bandint_shift_145", "bandint_shift_225", - "calT_93","calE_93", - "calT_145","calE_145", - "calT_225","calE_225", + "calT_93", "calE_93", + "calT_145", "calE_145", + "calT_225", "calE_225", "calG_all", - "alpha_93","alpha_145","alpha_225", - ] + "alpha_93", "alpha_145", "alpha_225", + ] self.ThFo = TheoryForge_MFLike(self) self.log.info("Initialized!") @@ -92,11 +93,12 @@ def initialize_with_params(self): differences) def get_requirements(self): - return dict(Cl={k: max(c, self.lmax_theory+1) for k, c in self.lcuts.items()}) + return dict(Cl={k: max(c, self.lmax_theory + 1) for k, c in self.lcuts.items()}) def _get_theory(self, **params_values): cl = self.provider.get_Cl(ell_factor=True) - params_values_nocosmo = {k: params_values[k] for k in self.expected_params_fg + self.expected_params_nuis} + params_values_nocosmo = {k: params_values[k] for + k in self.expected_params_fg + self.expected_params_nuis} return self._get_power_spectra(cl, **params_values_nocosmo) def loglike(self, cl, **params_values_nocosmo): @@ -159,10 +161,10 @@ def get_cl_meta(spec): freq_1, freq_2 = spec["frequencies"] # Read off polarization channel combinations pols = spec.get("polarizations", - default_cuts["polarizations"]).copy() + default_cuts["polarizations"]).copy() # Read off scale cuts scls = spec.get("scales", - default_cuts["scales"]).copy() + default_cuts["scales"]).copy() # For the same two channels, do not include ET and TE, only TE if (exp_1 == exp_2) and (freq_1 == freq_2): @@ -176,7 +178,7 @@ def get_cl_meta(spec): # Symmetrization if ("TE" in pols) and ("ET" in pols): symm = spec.get("symmetrize", - default_cuts["symmetrize"]) + default_cuts["symmetrize"]) else: symm = False @@ -319,13 +321,13 @@ def get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2): np.arange(cls.size, dtype=int)), "pol": ppol_dict[pol], - "hasYX_xsp": pol in ["ET","BE","BT"], #This is necessary for handling symmetrization - "t1": xp_nu(exp_1, freq_1), # - "t2": xp_nu(exp_2, freq_2), # + "hasYX_xsp": pol in ["ET", "BE", "BT"], # For symm + "t1": xp_nu(exp_1, freq_1), + "t2": xp_nu(exp_2, freq_2), "nu1": freq_1, "nu2": freq_2, - "leff": ls, # - "cl_data": cls, # + "leff": ls, + "cl_data": cls, "bpw": ws}) index_sofar += cls.size if not cbbl_extra: @@ -355,18 +357,17 @@ def get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2): self.data = GaussianData("mflike", self.ell_vec, self.data_vec, self.cov) - def _get_power_spectra(self, cl, **params_values_nocosmo): # Get Cl's from the theory code Dls = {s: cl[s][self.l_bpws] for s, _ in self.lcuts.items()} - DlsObs = self.ThFo.get_modified_theory(Dls,**params_values_nocosmo) + DlsObs = self.ThFo.get_modified_theory(Dls, **params_values_nocosmo) ps_vec = np.zeros_like(self.data_vec) for m in self.spec_meta: p = m["pol"] i = m["ids"] w = m["bpw"].weight.T - clt = np.dot(w, DlsObs[p, m["nu1"], m["nu2"]]) + clt = np.dot(w, DlsObs[p, m["nu1"], m["nu2"]]) ps_vec[i] = clt return ps_vec diff --git a/soliket/poisson_data.py b/soliket/poisson_data.py index f5f5f1a0..e1ed72d9 100644 --- a/soliket/poisson_data.py +++ b/soliket/poisson_data.py @@ -13,8 +13,8 @@ class PoissonData: Columns of catalog relevant for computing poisson rate. samples : dict, optional Each entry is an N_cat x N_samples array of posterior samples; - plus, should have a 'prior' entry of the same shape that is the value of the interim - prior for each sample. + plus, should have a 'prior' entry of the same shape that is the value of the + interim prior for each sample. """ def __init__(self, name, catalog, columns, samples=None): @@ -26,12 +26,12 @@ def __init__(self, name, catalog, columns, samples=None): if samples is not None: for c in columns: if c not in samples: - raise ValueError( - "If providing samples, must have samples for all columns: {}".format(columns) - ) + raise ValueError("If providing samples, must have samples \ + for all columns: {}".format(columns)) if "prior" not in samples: - raise ValueError('Must provide value of interim prior for all samples, under "prior" key!') + raise ValueError('Must provide value of interim prior \ + for all samples, under "prior" key!') assert all( [samples[k].shape == samples["prior"].shape for k in samples] @@ -59,7 +59,8 @@ def loglike(self, rate_fn, n_expected, broadcastable=False): # Simple case; no uncertainties if self.samples is None: if broadcastable: - rate_densities = rate_fn(**{c: self.catalog[c].values for c in self.columns}) + rate_densities = rate_fn(**{c: self.catalog[c].values for + c in self.columns}) else: rate_densities = np.array( [ @@ -72,7 +73,8 @@ def loglike(self, rate_fn, n_expected, broadcastable=False): else: # Eqn (11) of DFM, Hogg & Morton (https://arxiv.org/pdf/1406.3020.pdf) - summand = rate_fn(**{c: self.samples[c] for c in self.columns}) / self.samples["prior"] + summand = rate_fn(**{c: self.samples[c] for + c in self.columns}) / self.samples["prior"] l_k = 1 / self.N_k * summand.sum(axis=1) assert l_k.shape == (self._len,) return -n_expected + sum(np.log(l_k)) diff --git a/soliket/ps.py b/soliket/ps.py index 3e1ddf34..e03fae5b 100644 --- a/soliket/ps.py +++ b/soliket/ps.py @@ -25,7 +25,8 @@ class BinnedPSLikelihood(PSLikelihood): def initialize(self): self.binning_matrix = self._get_binning_matrix() - self.bin_centers = self.binning_matrix.dot(np.arange(self.binning_matrix.shape[1])) + self.bin_centers = \ + self.binning_matrix.dot(np.arange(self.binning_matrix.shape[1])) super().initialize() @classmethod diff --git a/soliket/tests/test_cross_correlation.py b/soliket/tests/test_cross_correlation.py index 6e4d13db..191a950f 100644 --- a/soliket/tests/test_cross_correlation.py +++ b/soliket/tests/test_cross_correlation.py @@ -2,7 +2,8 @@ from soliket.ccl import CCL from soliket.cross_correlation import CrossCorrelationLikelihood from cobaya.model import get_model -from cobaya.likelihood import Likelihood +# from cobaya.likelihood import Likelihood + def test_cross_correlation(): cosmo_params = { @@ -13,13 +14,13 @@ def test_cross_correlation(): } info = {"params": {"omch2": cosmo_params['Omega_c'] * cosmo_params['h'] ** 2., - "ombh2": cosmo_params['Omega_b'] * cosmo_params['h'] ** 2., - "H0": cosmo_params['h'] * 100, - "ns": cosmo_params['n_s'], - "As": 2.2e-9, - "tau": 0, - "b1": 1, - "s1": 0.4}, + "ombh2": cosmo_params['Omega_b'] * cosmo_params['h'] ** 2., + "H0": cosmo_params['h'] * 100, + "ns": cosmo_params['n_s'], + "As": 2.2e-9, + "tau": 0, + "b1": 1, + "s1": 0.4}, "likelihood": {"CrossCorrelationLikelihood": CrossCorrelationLikelihood}, "theory": { "camb": None, @@ -29,5 +30,4 @@ def test_cross_correlation(): model = get_model(info) loglikes, derived = model.loglikes() - assert np.isclose(loglikes[0], 88.2, atol = .2, rtol = 0.) - + assert np.isclose(loglikes[0], 88.2, atol=.2, rtol=0.) diff --git a/soliket/tests/test_gaussian.py b/soliket/tests/test_gaussian.py index 94a74a96..0fc227e2 100644 --- a/soliket/tests/test_gaussian.py +++ b/soliket/tests/test_gaussian.py @@ -1,4 +1,4 @@ -import unittest +# import unittest import numpy as np from sklearn.datasets import make_spd_matrix diff --git a/soliket/tests/test_lensing.py b/soliket/tests/test_lensing.py index 2c75bebb..cee56ef8 100644 --- a/soliket/tests/test_lensing.py +++ b/soliket/tests/test_lensing.py @@ -11,6 +11,7 @@ tempfile.gettempdir(), "lensing_packages" ) + def get_demo_lensing_model(theory): if theory == "camb": info_yaml = r""" @@ -31,7 +32,7 @@ def get_demo_lensing_model(theory): H0: prior: min: 40 - max: 100 + max: 100 """ elif theory == "classy": info_yaml = r""" @@ -53,7 +54,7 @@ def get_demo_lensing_model(theory): H0: prior: min: 40 - max: 100 + max: 100 """ diff --git a/soliket/tests/test_lensing_lite.py b/soliket/tests/test_lensing_lite.py index dbef13ea..b41d974a 100644 --- a/soliket/tests/test_lensing_lite.py +++ b/soliket/tests/test_lensing_lite.py @@ -25,7 +25,7 @@ def get_demo_lensing_model(theory): H0: prior: min: 40 - max: 100 + max: 100 """ elif theory == "classy": info_yaml = r""" @@ -47,7 +47,7 @@ def get_demo_lensing_model(theory): H0: prior: min: 40 - max: 100 + max: 100 """ diff --git a/soliket/tests/test_mflike.py b/soliket/tests/test_mflike.py index 22ab27ca..491010d5 100644 --- a/soliket/tests/test_mflike.py +++ b/soliket/tests/test_mflike.py @@ -7,9 +7,8 @@ from distutils.version import LooseVersion import camb -import mflike -import soliket - +import mflike # noqa +import soliket # noqa packages_path = os.environ.get("COBAYA_PACKAGES_PATH") or os.path.join( tempfile.gettempdir(), "LAT_packages" @@ -40,9 +39,9 @@ "a_psee": 0, "a_pste": 0, "xi": 0, - "bandint_shift_93" : 0, - "bandint_shift_145" : 0, - "bandint_shift_225" : 0, + "bandint_shift_93": 0, + "bandint_shift_145": 0, + "bandint_shift_225": 0, "calT_93": 1, "calE_93": 1, "calT_145": 1, @@ -57,9 +56,15 @@ if LooseVersion(camb.__version__) < LooseVersion('1.3'): - chi2s = {"tt": 1384.5669, "te": 1400.2760, "ee": 1428.7597, "tt-te-et-ee": 2412.9275} + chi2s = {"tt": 1384.5669, + "te": 1400.2760, + "ee": 1428.7597, + "tt-te-et-ee": 2412.9275} else: - chi2s = {"tt": 737.8571537677649, "te-et": 998.2730263280033, "ee": 716.4015196388742, "tt-te-et-ee": 2459.7250} + chi2s = {"tt": 737.8571537677649, + "te-et": 998.2730263280033, + "ee": 716.4015196388742, + "tt-te-et-ee": 2459.7250} pre = "data_sacc_" @@ -70,7 +75,8 @@ class MFLikeTest(unittest.TestCase): def setUp(self): from cobaya.install import install - install({"likelihood": {"mflike.MFLike": None}}, path=packages_path, skip_global=True) + install({"likelihood": {"mflike.MFLike": None}}, + path=packages_path, skip_global=True) def get_mflike_type(self, as_string=False): if self.orig: @@ -84,16 +90,17 @@ def get_mflike_type(self, as_string=False): return eval(t) def test_mflike(self): - # As of now, there is not a mechanism - # in soliket to ensure there is .loglike that can be called like this - # w/out cobaya + # As of now, there is not a mechanism + # in soliket to ensure there is .loglike that can be called like this + # w/out cobaya camb_cosmo = cosmo_params.copy() camb_cosmo.update({"lmax": 9000, "lens_potential_accuracy": 1}) pars = camb.set_params(**camb_cosmo) results = camb.get_results(pars) powers = results.get_cmb_power_spectra(pars, CMB_unit="muK") - cl_dict = {k: powers["total"][:, v] for k, v in {"tt": 0, "ee": 1, "te": 3}.items()} + cl_dict = {k: powers["total"][:, v] for + k, v in {"tt": 0, "ee": 1, "te": 3}.items()} for select, chi2 in chi2s.items(): MFLike = self.get_mflike_type() @@ -114,7 +121,7 @@ def test_mflike(self): }, } ) - + loglike = my_mflike.loglike(cl_dict, **nuisance_params) self.assertAlmostEqual(-2 * (loglike - my_mflike.logp_const), chi2, 2) @@ -122,7 +129,7 @@ def test_mflike(self): def test_cobaya(self): mflike_type = self.get_mflike_type(as_string=True) - params = dict(cosmo_params) + # params = dict(cosmo_params) # params['a_tSZ'] = 3.3 info = { diff --git a/soliket/tests/test_multi.py b/soliket/tests/test_multi.py index ccf63aaa..db56e722 100644 --- a/soliket/tests/test_multi.py +++ b/soliket/tests/test_multi.py @@ -1,5 +1,5 @@ import numpy as np -import pytest +# import pytest from soliket.tests.test_mflike import cosmo_params, nuisance_params @@ -16,7 +16,8 @@ def test_multi(): camb_options = {"extra_args": {"lens_potential_accuracy": 1}} - fg_params = {"a_tSZ": {"prior": {"min": 3.0, "max": 3.6}}, "a_kSZ": {"prior": {"min": 1.4, "max": 1.8}}} + fg_params = {"a_tSZ": {"prior": {"min": 3.0, "max": 3.6}}, + "a_kSZ": {"prior": {"min": 1.4, "max": 1.8}}} mflike_params = {**cosmo_params, **nuisance_params} mflike_params.update(fg_params) diff --git a/soliket/tests/test_ps.py b/soliket/tests/test_ps.py index 68515744..e2754ea8 100644 --- a/soliket/tests/test_ps.py +++ b/soliket/tests/test_ps.py @@ -3,10 +3,10 @@ import numpy as np from sklearn.datasets import make_spd_matrix -from scipy.stats import multivariate_normal +# from scipy.stats import multivariate_normal from soliket.gaussian import GaussianData, CrossCov -from soliket import GaussianLikelihood, MultiGaussianLikelihood +from soliket import MultiGaussianLikelihood from soliket import PSLikelihood from soliket.utils import get_likelihood @@ -75,5 +75,7 @@ def test_toy(): like2 = get_likelihood(lhood, info2) like3 = get_likelihood(lhood, info3) - assert np.isclose(multilike1.logp(), sum([l.logp() for l in [like1, like2, like3]])) - assert not np.isclose(multilike2.logp(), sum([l.logp() for l in [like1, like2, like3]])) + assert np.isclose(multilike1.logp(), sum([likex.logp() for + likex in [like1, like2, like3]])) + assert not np.isclose(multilike2.logp(), sum([likex.logp() for + likex in [like1, like2, like3]])) diff --git a/soliket/tests/test_runs.py b/soliket/tests/test_runs.py index 2b5b3b57..239a7f61 100644 --- a/soliket/tests/test_runs.py +++ b/soliket/tests/test_runs.py @@ -4,7 +4,13 @@ from cobaya.yaml import yaml_load from cobaya.run import run -@pytest.mark.parametrize("lhood", ["mflike", "lensing", "lensing_lite", "multi", "cross_correlation"]) + +@pytest.mark.parametrize("lhood", + ["mflike", + "lensing", + "lensing_lite", + "multi", + "cross_correlation"]) def test_evaluate(lhood): info = yaml_load(pkgutil.get_data("soliket", f"tests/test_{lhood}.yaml")) info["force"] = True @@ -12,7 +18,13 @@ def test_evaluate(lhood): updated_info, sampler = run(info) -@pytest.mark.parametrize("lhood", ["mflike", "lensing", "lensing_lite", "multi", "cross_correlation"]) + +@pytest.mark.parametrize("lhood", + ["mflike", + "lensing", + "lensing_lite", + "multi", + "cross_correlation"]) def test_mcmc(lhood): info = yaml_load(pkgutil.get_data("soliket", f"tests/test_{lhood}.yaml")) info["force"] = True diff --git a/soliket/theoryforge_MFLike.py b/soliket/theoryforge_MFLike.py index 3af346dc..25ae860e 100644 --- a/soliket/theoryforge_MFLike.py +++ b/soliket/theoryforge_MFLike.py @@ -2,15 +2,17 @@ import os -#Converts from cmb units to brightness. Numerical factors not included, it needs proper normalization when used. +# Converts from cmb units to brightness. +# Numerical factors not included, it needs proper normalization when used. def _cmb2bb(nu): - # NB: numerical factors not included + # NB: numerical factors not included from scipy import constants T_CMB = 2.72548 - x = nu*constants.h * 1e9 / constants.k / T_CMB - return np.exp(x)*(nu * x / np.expm1(x))**2 + x = nu * constants.h * 1e9 / constants.k / T_CMB + return np.exp(x) * (nu * x / np.expm1(x))**2 -#Provides the frequency value given the bandpass name. To be modified - it is ACT based!!!!!! + +# Provides the frequency value given the bandpass name. To be modified - it is ACT based!! def _get_fr(array): a = array.split("_")[0] if a == 'PA1' or a == 'PA2': @@ -22,7 +24,7 @@ def _get_fr(array): class TheoryForge_MFLike: - def __init__(self,mflike): + def __init__(self, mflike): self.data_folder = mflike.data_folder self.freqs = mflike.freqs @@ -31,36 +33,36 @@ def __init__(self,mflike): self.requested_cls = mflike.requested_cls self.expected_params_fg = mflike.expected_params_fg self.expected_params_nuis = mflike.expected_params_nuis - self.spec_meta = mflike.spec_meta + self.spec_meta = mflike.spec_meta self.defaults_cuts = mflike.defaults - #Initialize foreground model + # Initialize foreground model self._init_foreground_model() - #Parameters for template from file + # Parameters for template from file self.systematics_template = mflike.systematics_template - #Initialize template for marginalization, if needed + # Initialize template for marginalization, if needed if(self.systematics_template["has_file"]): self._init_template_from_file() - #Parameters for band integration + # Parameters for band integration self.bandint_nsteps = mflike.band_integration["nsteps"] self.bandint_width = mflike.band_integration["bandwidth"] self.bandint_external_bandpass = mflike.band_integration["external_bandpass"] - - #Bandpass construction for band integration + + # Bandpass construction for band integration if self.bandint_external_bandpass: path = os.path.normpath(os.path.join(self.data_folder, - '/bp_int/')) + '/bp_int/')) arrays = os.listdir(path) self._init_external_bandpass_construction(arrays) - def get_modified_theory(self,Dls,**params): - + def get_modified_theory(self, Dls, **params): + fg_params = {k: params[k] for k in self.expected_params_fg} nuis_params = {k: params[k] for k in self.expected_params_nuis} - #Bandpass construction for band integration + # Bandpass construction for band integration if self.bandint_external_bandpass: self.bandint_freqs = self._external_bandpass_construction(**nuis_params) else: @@ -69,57 +71,56 @@ def get_modified_theory(self,Dls,**params): fg_dict = self._get_foreground_model(**fg_params) cmbfg_dict = {} - #Sum CMB and FGs + # Sum CMB and FGs for f1 in self.freqs: for f2 in self.freqs: for s in self.requested_cls: - cmbfg_dict[s,f1,f2] = Dls[s]+fg_dict[s,'all',f1,f2] + cmbfg_dict[s, f1, f2] = Dls[s] + fg_dict[s, 'all', f1, f2] + # Apply alm based calibration factors + cmbfg_dict = self._get_calibrated_spectra(cmbfg_dict, **nuis_params) - #Apply alm based calibration factors - cmbfg_dict = self._get_calibrated_spectra(cmbfg_dict,**nuis_params) + # Introduce spectra rotations + cmbfg_dict = self._get_rotated_spectra(cmbfg_dict, **nuis_params) - #Introduce spectra rotations - cmbfg_dict = self._get_rotated_spectra(cmbfg_dict,**nuis_params) - - #Introduce templates of systematics from file, if needed + # Introduce templates of systematics from file, if needed if(self.systematics_template['has_file']): - cmbfg_dict = self._get_template_from_file(cmbfg_dict,**nuis_params) + cmbfg_dict = self._get_template_from_file(cmbfg_dict, **nuis_params) - #Built theory + # Built theory dls_dict = {} for m in self.spec_meta: p = m['pol'] - if p in ['tt','ee','bb']: + if p in ['tt', 'ee', 'bb']: dls_dict[p, m['nu1'], m['nu2']] = cmbfg_dict[p, m['nu1'], m['nu2']] - else: #['te','tb','eb'] - if m['hasYX_xsp']: #not symmetrizing + else: # ['te','tb','eb'] + if m['hasYX_xsp']: # not symmetrizing dls_dict[p, m['nu1'], m['nu2']] = cmbfg_dict[p, m['nu2'], m['nu1']] else: dls_dict[p, m['nu1'], m['nu2']] = cmbfg_dict[p, m['nu1'], m['nu2']] - if self.defaults_cuts['symmetrize']: #we average TE and ET (as we do for data) + if self.defaults_cuts['symmetrize']: # we average TE and ET (as for data) dls_dict[p, m['nu1'], m['nu2']] += cmbfg_dict[p, m['nu2'], m['nu1']] dls_dict[p, m['nu1'], m['nu2']] *= 0.5 - return dls_dict ########################################################################### -## This part deals with foreground construction and bandpass integration ## +# This part deals with foreground construction and bandpass integration ## ########################################################################### - #Initializes the foreground model. It sets the SED and reads the templates + # Initializes the foreground model. It sets the SED and reads the templates def _init_foreground_model(self): from fgspectra import cross as fgc from fgspectra import frequency as fgf from fgspectra import power as fgp - - template_path = os.path.join(os.path.dirname(os.path.abspath(fgp.__file__)),'data') - cibc_file = os.path.join(template_path,'cl_cib_Choi2020.dat') - - #set pivot freq and multipole + + template_path = os.path.join(os.path.dirname(os.path.abspath(fgp.__file__)), + 'data') + cibc_file = os.path.join(template_path, 'cl_cib_Choi2020.dat') + + # set pivot freq and multipole self.fg_nu_0 = self.foregrounds["normalisation"]["nu_0"] self.fg_ell_0 = self.foregrounds["normalisation"]["ell_0"] @@ -129,77 +130,110 @@ def _init_foreground_model(self): self.cibp = fgc.FactorizedCrossSpectrum(fgf.ModifiedBlackBody(), fgp.PowerLaw()) self.radio = fgc.FactorizedCrossSpectrum(fgf.PowerLaw(), fgp.PowerLaw()) self.tsz = fgc.FactorizedCrossSpectrum(fgf.ThermalSZ(), fgp.tSZ_150_bat()) - self.cibc = fgc.FactorizedCrossSpectrum(fgf.CIB(), fgp.PowerSpectrumFromFile(cibc_file)) + self.cibc = fgc.FactorizedCrossSpectrum(fgf.CIB(), + fgp.PowerSpectrumFromFile(cibc_file)) self.dust = fgc.FactorizedCrossSpectrum(fgf.ModifiedBlackBody(), fgp.PowerLaw()) self.tSZ_and_CIB = fgc.SZxCIB_Choi2020() components = self.foregrounds["components"] self.fg_component_list = {s: components[s] for s in self.requested_cls} - - #Gets the actual power spectrum of foregrounds given the passed parameters - def _get_foreground_model(self,ell = None,**fg_params): - #if ell = None, it uses the l_bpws, otherwise the ell array provided - #useful to make tests at different l_max than the data - if not hasattr(ell,'__len__'): + # Gets the actual power spectrum of foregrounds given the passed parameters + def _get_foreground_model(self, ell=None, **fg_params): + # if ell = None, it uses the l_bpws, otherwise the ell array provided + # useful to make tests at different l_max than the data + if not hasattr(ell, '__len__'): ell = self.l_bpws ell_0 = self.fg_ell_0 nu_0 = self.fg_nu_0 # Normalisation of radio sources - ell_clp = ell*(ell+1.) - ell_0clp = ell_0*(ell_0+1.) + ell_clp = ell * (ell + 1.) + ell_0clp = ell_0 * (ell_0 + 1.) model = {} - model["tt", "kSZ"] = fg_params["a_kSZ"] * self.ksz( - {"nu": self.bandint_freqs}, - {"ell": ell, "ell_0": ell_0}) - model["tt", "cibp"] = fg_params["a_p"] * self.cibp( - {"nu": self.bandint_freqs, "nu_0": nu_0, - "temp": fg_params["T_d"], "beta": fg_params["beta_p"]}, - {"ell": ell_clp, "ell_0": ell_0clp, "alpha": 1}) - model["tt", "radio"] = fg_params["a_s"] * self.radio( - {"nu": self.bandint_freqs, "nu_0": nu_0, "beta": -0.5 - 2.}, - {"ell": ell_clp, "ell_0": ell_0clp,"alpha":1}) - model["tt", "tSZ"] = fg_params["a_tSZ"] * self.tsz( - {"nu": self.bandint_freqs, "nu_0": nu_0}, - {"ell": ell, "ell_0": ell_0}) - model["tt", "cibc"] = fg_params["a_c"] * self.cibc( - {"nu": self.bandint_freqs, "nu_0": nu_0, - "temp": fg_params["T_d"], "beta": fg_params["beta_c"]}, - {'ell':ell, 'ell_0':ell_0}) - model["tt", "dust"] = fg_params["a_gtt"] * self.dust( - {"nu": self.bandint_freqs, "nu_0": nu_0, - "temp": 19.6, "beta": 1.5}, - {"ell": ell, "ell_0": 500., "alpha": -0.6}) - model["tt","tSZ_and_CIB"] = self.tSZ_and_CIB( - {'kwseq': ( - {'nu': self.bandint_freqs, 'nu_0': nu_0}, - {'nu': self.bandint_freqs, 'nu_0': nu_0, 'temp': fg_params['T_d'], 'beta': fg_params["beta_c"]} - )}, - {'kwseq': ( - {'ell':ell, 'ell_0': ell_0, - 'amp': fg_params['a_tSZ']}, - {'ell':ell, 'ell_0': ell_0, 'amp': fg_params['a_c']}, - {'ell':ell, 'ell_0': ell_0, - 'amp': - fg_params['xi'] * np.sqrt(fg_params['a_tSZ'] * fg_params['a_c'])} - )}) - - model["ee", "radio"] = fg_params["a_psee"] * self.radio( - {"nu": self.bandint_freqs, "nu_0": nu_0, "beta": -0.5 - 2.}, - {"ell": ell_clp, "ell_0": ell_0clp,"alpha":1}) - model["ee", "dust"] = fg_params["a_gee"] * self.dust( - {"nu": self.bandint_freqs, "nu_0": nu_0, - "temp": 19.6, "beta": 1.5}, - {"ell": ell, "ell_0": 500., "alpha": -0.4}) - - model["te", "radio"] = fg_params["a_pste"] * self.radio( - {"nu": self.bandint_freqs, "nu_0": nu_0, "beta": -0.5 - 2.}, - {"ell": ell_clp, "ell_0": ell_0clp,"alpha":1}) - model["te", "dust"] = fg_params["a_gte"] * self.dust( - {"nu": self.bandint_freqs, "nu_0": nu_0, - "temp": 19.6, "beta": 1.5}, - {"ell": ell, "ell_0": 500., "alpha": -0.4}) + model["tt", "kSZ"] = fg_params["a_kSZ"] * self.ksz({"nu": self.bandint_freqs}, + {"ell": ell, + "ell_0": ell_0}) + + model["tt", "cibp"] = fg_params["a_p"] * self.cibp({"nu": self.bandint_freqs, + "nu_0": nu_0, + "temp": fg_params["T_d"], + "beta": fg_params["beta_p"]}, + {"ell": ell_clp, + "ell_0": ell_0clp, + "alpha": 1}) + + model["tt", "radio"] = fg_params["a_s"] * self.radio({"nu": self.bandint_freqs, + "nu_0": nu_0, + "beta": -0.5 - 2.}, + {"ell": ell_clp, + "ell_0": ell_0clp, + "alpha": 1}) + + model["tt", "tSZ"] = fg_params["a_tSZ"] * self.tsz({"nu": self.bandint_freqs, + "nu_0": nu_0}, + {"ell": ell, + "ell_0": ell_0}) + + model["tt", "cibc"] = fg_params["a_c"] * self.cibc({"nu": self.bandint_freqs, + "nu_0": nu_0, + "temp": fg_params["T_d"], + "beta": fg_params["beta_c"]}, + {'ell': ell, + 'ell_0': ell_0}) + + model["tt", "dust"] = fg_params["a_gtt"] * self.dust({"nu": self.bandint_freqs, + "nu_0": nu_0, + "temp": 19.6, + "beta": 1.5}, + {"ell": ell, + "ell_0": 500., + "alpha": -0.6}) + + model["tt", "tSZ_and_CIB"] = \ + self.tSZ_and_CIB({'kwseq': ({'nu': self.bandint_freqs, 'nu_0': nu_0}, + {'nu': self.bandint_freqs, 'nu_0': nu_0, + 'temp': fg_params['T_d'], + 'beta': fg_params["beta_c"]})}, + {'kwseq': ({'ell': ell, 'ell_0': ell_0, + 'amp': fg_params['a_tSZ']}, + {'ell': ell, 'ell_0': ell_0, + 'amp': fg_params['a_c']}, + {'ell': ell, 'ell_0': ell_0, + 'amp': - fg_params['xi'] \ + * np.sqrt(fg_params['a_tSZ'] * + fg_params['a_c'])})}) + + model["ee", "radio"] = fg_params["a_psee"] * self.radio({"nu": self.bandint_freqs, + "nu_0": nu_0, + "beta": -0.5 - 2.}, + {"ell": ell_clp, + "ell_0": ell_0clp, + "alpha": 1}) + + model["ee", "dust"] = fg_params["a_gee"] * self.dust({"nu": self.bandint_freqs, + "nu_0": nu_0, + "temp": 19.6, + "beta": 1.5}, + {"ell": ell, + "ell_0": 500., + "alpha": -0.4}) + + model["te", "radio"] = fg_params["a_pste"] * self.radio({"nu": self.bandint_freqs, + "nu_0": nu_0, + "beta": -0.5 - 2.}, + {"ell": ell_clp, + "ell_0": ell_0clp, + "alpha": 1}) + + model["te", "dust"] = fg_params["a_gte"] * self.dust({"nu": self.bandint_freqs, + "nu_0": nu_0, + "temp": 19.6, + "beta": 1.5}, + {"ell": ell, + "ell_0": 500., + "alpha": -0.4}) fg_dict = {} for c1, f1 in enumerate(self.freqs): @@ -221,121 +255,129 @@ def _get_foreground_model(self,ell = None,**fg_params): fg_dict[s, "all", f1, f2] += fg_dict[s, comp, f1, f2] return fg_dict - - #Takes care of the bandpass construction. It returns a list of nu-transmittance for each frequency or an array with the effective freqs. - def _bandpass_construction(self,**params): + # Takes care of the bandpass construction. It returns a list of nu-transmittance for + # each frequency or an array with the effective freqs. + def _bandpass_construction(self, **params): if not hasattr(self.bandint_width, "__len__"): - self.bandint_width = np.full_like(self.freqs,self.bandint_width,dtype=np.float) + self.bandint_width = np.full_like(self.freqs, self.bandint_width, + dtype=np.float) if np.any(np.array(self.bandint_width) > 0): - assert self.bandint_nsteps > 1 , 'bandint_width and bandint_nsteps not coherent' - assert np.all(np.array(self.bandint_width) > 0), 'one band has width = 0, set a positive width and run again' + assert self.bandint_nsteps > 1, 'bandint_width and bandint_nsteps not \ + coherent' + assert np.all(np.array(self.bandint_width) > 0), 'one band has width = 0, \ + set a positive width and \ + run again' bandint_freqs = [] - for ifr,fr in enumerate(self.freqs): - bandpar = 'bandint_shift_'+str(fr) - bandlow = fr*(1-self.bandint_width[ifr]*.5) - bandhigh = fr*(1+self.bandint_width[ifr]*.5) - nubtrue = np.linspace(bandlow,bandhigh,self.bandint_nsteps,dtype=float) - nub = np.linspace(bandlow+params[bandpar],bandhigh+params[bandpar],self.bandint_nsteps,dtype=float) + for ifr, fr in enumerate(self.freqs): + bandpar = 'bandint_shift_' + str(fr) + bandlow = fr * (1 - self.bandint_width[ifr] * .5) + bandhigh = fr * (1 + self.bandint_width[ifr] * .5) + nubtrue = np.linspace(bandlow, bandhigh, self.bandint_nsteps, dtype=float) + nub = np.linspace(bandlow + params[bandpar], bandhigh + params[bandpar], + self.bandint_nsteps, dtype=float) tranb = _cmb2bb(nub) - tranb_norm = np.trapz(_cmb2bb(nubtrue),nubtrue) - bandint_freqs.append([nub,tranb/tranb_norm]) + tranb_norm = np.trapz(_cmb2bb(nubtrue), nubtrue) + bandint_freqs.append([nub, tranb / tranb_norm]) else: - bandint_freqs = np.empty_like(self.freqs,dtype=float) - for ifr,fr in enumerate(self.freqs): - bandpar = 'bandint_shift_'+str(fr) - bandint_freqs[ifr] = fr+params[bandpar] + bandint_freqs = np.empty_like(self.freqs, dtype=float) + for ifr, fr in enumerate(self.freqs): + bandpar = 'bandint_shift_' + str(fr) + bandint_freqs[ifr] = fr + params[bandpar] - return bandint_freqs + return bandint_freqs - def _init_external_bandpass_construction(self,arrays): + def _init_external_bandpass_construction(self, arrays): self.external_bandpass = [] for array in arrays: fr = _get_fr(array) - nu_ghz, bp = np.loadtxt(array,usecols=(0,1),unpack=True) + nu_ghz, bp = np.loadtxt(array, usecols=(0, 1), unpack=True) trans_norm = np.trapz(bp * _cmb2bb(nu_ghz), nu_ghz) - self.external_bandpass.append([fr,nu_ghz,bp/trans_norm]) + self.external_bandpass.append([fr, nu_ghz, bp / trans_norm]) - def _external_bandpass_construction(self,**params): + def _external_bandpass_construction(self, **params): bandint_freqs = [] - for fr,nu_ghz,bp in self.external_bandpass: - bandpar = 'bandint_shift_'+str(fr) + for fr, nu_ghz, bp in self.external_bandpass: + bandpar = 'bandint_shift_' + str(fr) nub = nu_ghz + params[bandpar] - trans = bp * _cmb2bb(nub) - bandint_freqs.append([nub,trans]) + trans = bp * _cmb2bb(nub) + bandint_freqs.append([nub, trans]) + + return bandint_freqs - return bandint_freqs - ########################################################################### -## This part deals with calibration factors -## Here we implement an alm based calibration -## Each field {T,E,B}{freq1,freq2,...,freqn} gets an independent -## calibration factor, e.g. calT_145, calE_154, calT_225, etc.. -## A global calibration factor calG_all is also considered. +# This part deals with calibration factors +# Here we implement an alm based calibration +# Each field {T,E,B}{freq1,freq2,...,freqn} gets an independent +# calibration factor, e.g. calT_145, calE_154, calT_225, etc.. +# A global calibration factor calG_all is also considered. ########################################################################### - def _get_calibrated_spectra(self,dls_dict,**nuis_params): + def _get_calibrated_spectra(self, dls_dict, **nuis_params): from syslibrary import syslib_mflike as syl - cal_pars={} + cal_pars = {} if "tt" in self.requested_cls or "te" in self.requested_cls: - cal_pars["tt"]=(nuis_params["calG_all"] * - np.array([nuis_params['calT_'+str(fr)] for fr in self.freqs])) - + cal_pars["tt"] = (nuis_params["calG_all"] * + np.array([nuis_params['calT_' + str(fr)] for + fr in self.freqs])) if "ee" in self.requested_cls or "te" in self.requested_cls: - cal_pars["ee"]=(nuis_params["calG_all"] * - np.array([nuis_params['calE_'+str(fr)] for fr in self.freqs])) + cal_pars["ee"] = (nuis_params["calG_all"] * + np.array([nuis_params['calE_' + str(fr)] for + fr in self.freqs])) - calib = syl.Calibration_alm(ell=self.l_bpws,spectra=dls_dict) + calib = syl.Calibration_alm(ell=self.l_bpws, spectra=dls_dict) - return calib(cal1=cal_pars,cal2=cal_pars,nu=self.freqs) + return calib(cal1=cal_pars, cal2=cal_pars, nu=self.freqs) ########################################################################### -## This part deals with rotation of spectra -## Each freq {freq1,freq2,...,freqn} gets a rotation angle alpha_93, alpha_145, etc.. +# This part deals with rotation of spectra +# Each freq {freq1,freq2,...,freqn} gets a rotation angle alpha_93, alpha_145, etc.. ########################################################################### - def _get_rotated_spectra(self,dls_dict,**nuis_params): + def _get_rotated_spectra(self, dls_dict, **nuis_params): from syslibrary import syslib_mflike as syl - rot_pars=[nuis_params['alpha_'+str(fr)] for fr in self.freqs] + rot_pars = [nuis_params['alpha_' + str(fr)] for fr in self.freqs] - rot = syl.Rotation_alm(ell=self.l_bpws,spectra=dls_dict,cls=self.requested_cls) + rot = syl.Rotation_alm(ell=self.l_bpws, spectra=dls_dict, cls=self.requested_cls) - return rot(rot_pars,nu=self.freqs) + return rot(rot_pars, nu=self.freqs) ########################################################################### -## This part deals with template marginalization -## A dictionary of template dls is read from yaml (likely to be not efficient) -## then rescaled and added to theory dls +# This part deals with template marginalization +# A dictionary of template dls is read from yaml (likely to be not efficient) +# then rescaled and added to theory dls ########################################################################### - #Initializes the systematics templates + # Initializes the systematics templates # This is slow, but should be done only once def _init_template_from_file(self): from syslibrary import syslib_mflike as syl - #decide where to store systematics template. - #Currently stored inside syslibrary package - templ_from_file = syl.ReadTemplateFromFile(rootname=self.systematics_template["rootname"]) + # decide where to store systematics template. + # Currently stored inside syslibrary package + templ_from_file = \ + syl.ReadTemplateFromFile(rootname=self.systematics_template["rootname"]) self.dltempl_from_file = templ_from_file(ell=self.l_bpws) - def _get_template_from_file(self,dls_dict,**nuis_params): + def _get_template_from_file(self, dls_dict, **nuis_params): - #templ_pars=[nuis_params['templ_'+str(fr)] for fr in self.freqs] - #templ_pars currently hard-coded - #but ideally should be passed as input nuisance - templ_pars={cls: np.zeros((len(self.freqs),len(self.freqs))) for cls in self.requested_cls} + # templ_pars=[nuis_params['templ_'+str(fr)] for fr in self.freqs] + # templ_pars currently hard-coded + # but ideally should be passed as input nuisance + templ_pars = {cls: np.zeros((len(self.freqs), len(self.freqs))) + for cls in self.requested_cls} for cls in self.requested_cls: - for i1,f1 in enumerate(self.freqs): - for i2,f2 in enumerate(self.freqs): - dls_dict[cls,f1,f2] += (templ_pars[cls][i1][i2]* - self.dltempl_from_file[cls,f1,f2]) + for i1, f1 in enumerate(self.freqs): + for i2, f2 in enumerate(self.freqs): + dls_dict[cls, f1, f2] += (templ_pars[cls][i1][i2] * + self.dltempl_from_file[cls, f1, f2]) return dls_dict diff --git a/soliket/utils.py b/soliket/utils.py index b0f32ca8..bab60b04 100644 --- a/soliket/utils.py +++ b/soliket/utils.py @@ -34,4 +34,8 @@ class OneWithCls(one): lmax = 10000 def get_requirements(self): - return {"Cl": {"pp": self.lmax, "tt": self.lmax, "te": self.lmax, "ee": self.lmax, "bb": self.lmax,}} + return {"Cl": {"pp": self.lmax, + "tt": self.lmax, + "te": self.lmax, + "ee": self.lmax, + "bb": self.lmax, }} diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..d61fc10a --- /dev/null +++ b/tox.ini @@ -0,0 +1,6 @@ +[testenv:codestyle] +skip_install = true +changedir = . +description = check code style, e.g. with flake8 +deps = flake8 +commands = flake8 \ No newline at end of file