diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3e07a8e7..244d6c92 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,4 +71,12 @@ jobs: pytest -vvv --cov=${{ env.PROJECT_NAME }} --cov-report=xml --cov-report=term tests/ - name: Upload coverage reports to Codecov with GitHub Action + if: + ${{ matrix.python-version == '3.10' && matrix.os == 'ubuntu-latest'}} uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: ./coverage.xml + flags: unittests + name: codecov-umbrella + fail_ci_if_error: true diff --git a/.github/workflows/image_ci.yml b/.github/workflows/image_ci.yml index 9ae45c7f..4f0c8e53 100644 --- a/.github/workflows/image_ci.yml +++ b/.github/workflows/image_ci.yml @@ -98,7 +98,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Build and export to Docker - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . file: ./resources/docker/Dockerfile diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7cf9b69d..145fbad3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,12 +9,12 @@ ci: repos: - repo: https://github.com/psf/black - rev: "24.4.2" + rev: "24.8.0" hooks: - id: black-jupyter - repo: https://github.com/asottile/blacken-docs - rev: "1.16.0" + rev: "1.18.0" hooks: - id: blacken-docs additional_dependencies: [black==23.7.0] @@ -23,6 +23,7 @@ repos: rev: "v4.6.0" hooks: - id: check-added-large-files + args: ["--maxkb=1000"] - id: check-case-conflict - id: check-merge-conflict - id: check-symlinks @@ -50,13 +51,13 @@ repos: args: [--prose-wrap=always] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.4.10" + rev: "v0.5.6" hooks: - id: ruff args: ["--fix", "--show-fixes"] - repo: https://github.com/pre-commit/mirrors-mypy - rev: "v1.10.0" + rev: "v1.11.1" hooks: - id: mypy files: src @@ -70,11 +71,20 @@ repos: - id: codespell args: ["--write-changes", "--ignore-words", ".codespell-whitelist"] - - repo: https://github.com/kynan/nbstripout - rev: 0.7.1 + #- repo: https://github.com/kynan/nbstripout + #rev: 0.7.1 + #hooks: + #- id: nbstripout + #args: [--extra-keys=metadata.kernelspec metadata.language_info.version] + + - repo: local hooks: - - id: nbstripout - args: [--extra-keys=metadata.kernelspec metadata.language_info.version] + - id: strip-output-keep-html + name: Strip Jupyter Notebook output but keep HTML + entry: python strip_output_keep_html.py + language: python + additional_dependencies: [nbformat] + files: \.ipynb$ - repo: local hooks: diff --git a/README.md b/README.md index 63ddfba5..c4730fae 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,9 @@ [![Documentation Status](https://readthedocs.org/projects/caustics/badge/?version=latest)](https://caustics.readthedocs.io/en/latest/?badge=latest) [![PyPI version](https://badge.fury.io/py/caustics.svg)](https://pypi.org/project/caustics/) [![coverage](https://img.shields.io/codecov/c/github/Ciela-Institute/caustics)](https://app.codecov.io/gh/Ciela-Institute/caustics) -[![DOI](https://zenodo.org/badge/521722463.svg)](https://zenodo.org/doi/10.5281/zenodo.10806382) +[![status](https://joss.theoj.org/papers/995fa98462eb534a32952549ef2244f8/status.svg)](https://joss.theoj.org/papers/995fa98462eb534a32952549ef2244f8) +[![Zenodo](https://zenodo.org/badge/521722463.svg)](https://zenodo.org/doi/10.5281/zenodo.10806382) +[![arXiv](https://img.shields.io/badge/arXiv-2406.15542-b31b1b.svg)](https://arxiv.org/abs/2406.15542) # caustics @@ -45,7 +47,7 @@ x = torch.tensor([ 5.0, -0.2, 0.0, 0.8, 0.0, 1., 1.0, 10.0 ]) # fmt: skip -minisim = caustics.Lens_Source( +minisim = caustics.LensSource( lens=sie, source=src, lens_light=lnslt, pixelscale=0.05, pixels_x=100 ) plt.imshow(minisim(x, quad_level=3), origin="lower") diff --git a/docs/source/_toc.yml b/docs/source/_toc.yml index e1b6c431..d4a66389 100644 --- a/docs/source/_toc.yml +++ b/docs/source/_toc.yml @@ -9,11 +9,12 @@ chapters: - file: websim - file: tutorials/index sections: - - file: basicindex + - file: tutorials/Introduction + - file: interfaceindex sections: - - file: tutorials/BasicIntroduction_yaml - - file: tutorials/BasicIntroduction_oop - - file: tutorials/BasicIntroduction_func + - file: tutorials/InterfaceIntroduction_yaml + - file: tutorials/InterfaceIntroduction_oop + - file: tutorials/InterfaceIntroduction_func - file: tutorials/LensZoo - file: tutorials/VisualizeCaustics - file: tutorials/MultiplaneDemo diff --git a/docs/source/citation.rst b/docs/source/citation.rst index bb0c021e..f41aadd1 100644 --- a/docs/source/citation.rst +++ b/docs/source/citation.rst @@ -3,4 +3,23 @@ Citation ======== -Paper coming soon! We will put all the citation information here when its ready. +Paper is submitted to JOSS, in the meantime please cite the `arXiv version `_ of the paper: + +.. code-block:: bibtex + + @ARTICLE{2024arXiv240615542S, + author = {{Stone}, Connor and {Adam}, Alexandre and {Coogan}, Adam and {Yantovski-Barth}, M.~J. and {Filipp}, Andreas and {Setiawan}, Landung and {Core}, Cordero and {Legin}, Ronan and {Wilson}, Charles and {Missael Barco}, Gabriel and {Hezaveh}, Yashar and {Perreault-Levasseur}, Laurence}, + title = "{Caustics: A Python Package for Accelerated Strong Gravitational Lensing Simulations}", + journal = {arXiv e-prints}, + keywords = {Astrophysics - Instrumentation and Methods for Astrophysics, Astrophysics - Cosmology and Nongalactic Astrophysics}, + year = 2024, + month = jun, + eid = {arXiv:2406.15542}, + pages = {arXiv:2406.15542}, + doi = {10.48550/arXiv.2406.15542}, + archivePrefix = {arXiv}, + eprint = {2406.15542}, + primaryClass = {astro-ph.IM}, + adsurl = {https://ui.adsabs.harvard.edu/abs/2024arXiv240615542S}, + adsnote = {Provided by the SAO/NASA Astrophysics Data System} + } diff --git a/docs/source/examples/Example_ImageFit_LM.ipynb b/docs/source/examples/Example_ImageFit_LM.ipynb index a0fd7280..41f06858 100644 --- a/docs/source/examples/Example_ImageFit_LM.ipynb +++ b/docs/source/examples/Example_ImageFit_LM.ipynb @@ -56,10 +56,9 @@ "cosmology.to(dtype=torch.float32)\n", "\n", "upsample_factor = 1\n", - "thx, thy = caustics.utils.get_meshgrid(\n", + "thx, thy = caustics.utils.meshgrid(\n", " pixelscale / upsample_factor,\n", " upsample_factor * numPix,\n", - " upsample_factor * numPix,\n", " dtype=torch.float32,\n", ")\n", "z_l = torch.tensor(0.5, dtype=torch.float32)\n", @@ -110,7 +109,7 @@ ")\n", "\n", "# Image plane simulator\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens_mass_model,\n", " lens_light=lens_light_model,\n", " source=source_light_model,\n", diff --git a/docs/source/examples/Example_ImageFit_NUTS.ipynb b/docs/source/examples/Example_ImageFit_NUTS.ipynb index f11329ec..e53bcbdf 100644 --- a/docs/source/examples/Example_ImageFit_NUTS.ipynb +++ b/docs/source/examples/Example_ImageFit_NUTS.ipynb @@ -61,10 +61,9 @@ "cosmology.to(dtype=torch.float32)\n", "\n", "upsample_factor = 1\n", - "thx, thy = caustics.utils.get_meshgrid(\n", + "thx, thy = caustics.utils.meshgrid(\n", " pixelscale / upsample_factor,\n", " upsample_factor * numPix,\n", - " upsample_factor * numPix,\n", " dtype=torch.float32,\n", ")\n", "z_l = torch.tensor(0.5, dtype=torch.float32)\n", @@ -115,7 +114,7 @@ ")\n", "\n", "# Image plane simulator\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens_mass_model,\n", " lens_light=lens_light_model,\n", " source=source_light_model,\n", diff --git a/docs/source/examples/Example_QSOLensFit.ipynb b/docs/source/examples/Example_QSOLensFit.ipynb index 23b25ed5..d3db2af2 100644 --- a/docs/source/examples/Example_QSOLensFit.ipynb +++ b/docs/source/examples/Example_QSOLensFit.ipynb @@ -103,10 +103,9 @@ "res = 0.05\n", "upsample_factor = 1\n", "fov = res * n_pix\n", - "thx, thy = caustics.utils.get_meshgrid(\n", + "thx, thy = caustics.utils.meshgrid(\n", " res / upsample_factor,\n", " upsample_factor * n_pix,\n", - " upsample_factor * n_pix,\n", " dtype=torch.float32,\n", ")\n", "\n", diff --git a/docs/source/basicindex.rst b/docs/source/interfaceindex.rst similarity index 88% rename from docs/source/basicindex.rst rename to docs/source/interfaceindex.rst index 5dc0b729..64c1eb58 100644 --- a/docs/source/basicindex.rst +++ b/docs/source/interfaceindex.rst @@ -1,5 +1,5 @@ -Basic Introduction -================== +The three interfaces to Caustics +================================ There are three user interfaces to Caustics: configuration file, object-oriented, and functional. Here we have built the same tutorial three times over so you can see how the three interfaces compare. diff --git a/docs/source/intro.md b/docs/source/intro.md index ba9c6a94..2a438810 100644 --- a/docs/source/intro.md +++ b/docs/source/intro.md @@ -39,7 +39,7 @@ x = torch.tensor([ 5.0, -0.2, 0.0, 0.8, 0.0, 1., 1.0, 10.0 ]) # fmt: skip -minisim = caustics.Lens_Source( +minisim = caustics.LensSource( lens=sie, source=src, lens_light=lnslt, pixelscale=0.05, pixels_x=100 ) plt.imshow(minisim(x, quad_level=3), origin="lower") diff --git a/docs/source/tutorials/BasicIntroduction_func.ipynb b/docs/source/tutorials/InterfaceIntroduction_func.ipynb similarity index 98% rename from docs/source/tutorials/BasicIntroduction_func.ipynb rename to docs/source/tutorials/InterfaceIntroduction_func.ipynb index 8c97ccf7..8c90c395 100644 --- a/docs/source/tutorials/BasicIntroduction_func.ipynb +++ b/docs/source/tutorials/InterfaceIntroduction_func.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Welcome to Caustics! Functional\n", + "# Caustics Interface: Functional\n", "\n", "Caustics is a powerful gravitational lensing simulator that can support users from beginner to highly advanced. In this tutorial we will cover the basics of the caustics functional interface. This one is a bit different from the other two since we are building from the ground up." ] @@ -59,7 +59,7 @@ "# First lets define the sampling grid\n", "npix = 100\n", "pixelscale = 0.05\n", - "thx, thy = caustics.utils.get_meshgrid(pixelscale, npix, npix)\n", + "thx, thy = caustics.utils.meshgrid(pixelscale, npix, npix)\n", "gqx, gqy, gqW = caustics.utils.gaussian_quadrature_grid(\n", " pixelscale, thx, thy, quad_level=3\n", ")" diff --git a/docs/source/tutorials/BasicIntroduction_oop.ipynb b/docs/source/tutorials/InterfaceIntroduction_oop.ipynb similarity index 94% rename from docs/source/tutorials/BasicIntroduction_oop.ipynb rename to docs/source/tutorials/InterfaceIntroduction_oop.ipynb index dac7a7bd..aaca80f7 100644 --- a/docs/source/tutorials/BasicIntroduction_oop.ipynb +++ b/docs/source/tutorials/InterfaceIntroduction_oop.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Welcome to Caustics! Object Oriented\n", + "# Caustics Interface: Object Oriented\n", "\n", "Caustics is a powerful gravitational lensing simulator that can support users from beginner to highly advanced. In this tutorial we will cover the basics of the caustics object oriented programming interface." ] @@ -128,7 +128,7 @@ "metadata": {}, "outputs": [], "source": [ - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=sie, source=src, lens_light=lnslt, pixelscale=0.05, pixels_x=100\n", ")" ] @@ -244,7 +244,7 @@ "source": [ "### The Simulator Graph\n", "\n", - "Here we take a quick look at the simulator graph for the image we have produced. You will learn much more about what this means in the `Simulators` tutorial notebook, but let's cover the basics here. First, note that this is a Directed Acyclic Graph (DAG), this is how all simulator parameters are represented in Caustics. At the top of the graph is the `Lens_Source` object, you can see in brackets it has a name `sim` which is used as the identifier for it's node in the graph. At the next level is the `z_s` parameter for the redshift of the source. Next are the `SIE` lens, `Sersic` source, and `Sersic` lenslight objects which themselves each hold parameters. You will notice that all the parameters are in white boxes right now, this is because they are `dynamic` parameters which need values to be passed, grey boxes are used for parameters with fixed values." + "Here we take a quick look at the simulator graph for the image we have produced. You will learn much more about what this means in the `Simulators` tutorial notebook, but let's cover the basics here. First, note that this is a Directed Acyclic Graph (DAG), this is how all simulator parameters are represented in Caustics. At the top of the graph is the `LensSource` object, you can see in brackets it has a name `sim` which is used as the identifier for it's node in the graph. At the next level is the `z_s` parameter for the redshift of the source. Next are the `SIE` lens, `Sersic` source, and `Sersic` lenslight objects which themselves each hold parameters. You will notice that all the parameters are in white boxes right now, this is because they are `dynamic` parameters which need values to be passed, grey boxes are used for parameters with fixed values." ] }, { @@ -254,7 +254,7 @@ "outputs": [], "source": [ "# Substitute sim with sim for the yaml method\n", - "sim.get_graph(True, True)" + "sim.graph(True, True)" ] }, { diff --git a/docs/source/tutorials/BasicIntroduction_yaml.ipynb b/docs/source/tutorials/InterfaceIntroduction_yaml.ipynb similarity index 94% rename from docs/source/tutorials/BasicIntroduction_yaml.ipynb rename to docs/source/tutorials/InterfaceIntroduction_yaml.ipynb index 9a5056b2..6eef1777 100644 --- a/docs/source/tutorials/BasicIntroduction_yaml.ipynb +++ b/docs/source/tutorials/InterfaceIntroduction_yaml.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Welcome to Caustics! YAML\n", + "# Caustics Interface: YAML\n", "\n", "Caustics is a powerful gravitational lensing simulator that can support users from beginner to highly advanced. In this tutorial we will cover the basics of the caustics `yaml` interface." ] @@ -181,7 +181,7 @@ "source": [ "### The Simulator Graph\n", "\n", - "Here we take a quick look at the simulator graph for the image we have produced. You will learn much more about what this means in the `Simulators` tutorial notebook, but let's cover the basics here. First, note that this is a Directed Acyclic Graph (DAG), this is how all simulator parameters are represented in Caustics. At the top of the graph is the `Lens_Source` object, you can see in brackets it has a name `sim` which is used as the identifier for it's node in the graph. At the next level is the `z_s` parameter for the redshift of the source. Next are the `SIE` lens, `Sersic` source, and `Sersic` lenslight objects which themselves each hold parameters. You will notice that all the parameters are in white boxes right now, this is because they are `dynamic` parameters which need values to be passed, grey boxes are used for parameters with fixed values." + "Here we take a quick look at the simulator graph for the image we have produced. You will learn much more about what this means in the `Simulators` tutorial notebook, but let's cover the basics here. First, note that this is a Directed Acyclic Graph (DAG), this is how all simulator parameters are represented in Caustics. At the top of the graph is the `LensSource` object, you can see in brackets it has a name `sim` which is used as the identifier for it's node in the graph. At the next level is the `z_s` parameter for the redshift of the source. Next are the `SIE` lens, `Sersic` source, and `Sersic` lenslight objects which themselves each hold parameters. You will notice that all the parameters are in white boxes right now, this is because they are `dynamic` parameters which need values to be passed, grey boxes are used for parameters with fixed values." ] }, { @@ -191,7 +191,7 @@ "outputs": [], "source": [ "# The simulator is internally represented as a directed acyclic graph of operations\n", - "sim.get_graph(True, True)" + "sim.graph(True, True)" ] }, { @@ -336,7 +336,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Lens_Source Simulator\n", + "### LensSource Simulator\n", "\n", "Next we pass our configuration to a `Simulator` in caustics, simulators perform the work of forward modelling various configurations and producing the desired outputs. Here we are interested in a common scenario of producing an image of a background source through a lens distribution. It is possible to make your own simulator to represent all sorts of situations. First, we pass the `lens` model and the `source` model defined above. Next we use `pixelscale` and `pixels_x` to define the grid of pixels that will be sampled. Finally, we pass the `z_s` redshift at which the source (`Sersic`) model should be placed; recall that light models don't use the cosmology model and so aren't aware of their placement in space.\n", "\n", @@ -347,7 +347,7 @@ "```yaml\n", "simulator:\n", " name: minisim\n", - " kind: Lens_Source\n", + " kind: LensSource\n", " init_kwargs:\n", " lens: *lens\n", " source: *src\n", @@ -359,7 +359,7 @@ "In this section:\n", "- `simulator:` is the key that starts the definition of the simulator.\n", "- `name: minisim` sets the name of the simulator to 'minisim'.\n", - "- `kind: Lens_Source` sets the kind of the simulator to 'Lens_Source', which indicates that this simulator will simulate a lens and a source.\n", + "- `kind: LensSource` sets the kind of the simulator to 'LensSource', which indicates that this simulator will simulate a lens and a source.\n", "- `init_kwargs:` is a key for additional parameters required for the simulator.\n", "- `lens: *lens` sets the lens for the simulator to the lens defined earlier in the YAML file.\n", "- `source: *src` sets the source for the simulator to the source defined earlier in the YAML file.\n", @@ -369,16 +369,14 @@ "\n", "This section defines the `simulator` that will be used to perform the simulation. It references the lens, source, and lens light definitions from earlier in the YAML file and sets additional parameters for the simulator." ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { + "kernelspec": { + "display_name": "caustic", + "language": "python", + "name": "python3" + }, "language_info": { "codemirror_mode": { "name": "ipython", @@ -388,7 +386,8 @@ "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython3" + "pygments_lexer": "ipython3", + "version": "3.9.18" } }, "nbformat": 4, diff --git a/docs/source/tutorials/Introduction.ipynb b/docs/source/tutorials/Introduction.ipynb new file mode 100644 index 00000000..ddf37bcc --- /dev/null +++ b/docs/source/tutorials/Introduction.ipynb @@ -0,0 +1,853 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import torch\n", + "from torch import vmap\n", + "from torch.func import jacfwd" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Welcome to Caustics!\n", + "\n", + "In this introduction, we will showcase some of the features and design principles of Caustics. We will see\n", + "1. How to get started from one of our pre-built `Simulator`\n", + "\n", + "2. Visualization of the `Simulator` graph (DAG of `caustics` modules)\n", + "\n", + "3. Distinction between **Static** and **Dynamic** parameters\n", + "\n", + "4. How to create a a **batch** of simulations\n", + "\n", + "5. Semantic structure of the Simulator input\n", + "6. Taking gradient w.r.t. to parameters with `Pytorch` autodiff functionalities\n", + "7. Swapping in flexible modules like the `Pixelated` representation for more advanced usage\n", + "8. How to create your own Simulator" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Getting started with the `LensSource` Simulator\n", + "\n", + "For this first introduction, we use the simplest modules in caustics for the lens and source, namely the `SIE` and the `Sersic` modules. We also assume a `FlatLambdaCDM` cosmology. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from caustics import LensSource, SIE, Sersic, FlatLambdaCDM\n", + "\n", + "# Define parameters of the camera pixel grid\n", + "pixelscale = 0.04 # arcsec/pixel\n", + "pixels = 100\n", + "\n", + "# Instantiate modules for the simulator\n", + "cosmo = FlatLambdaCDM(name=\"cosmo\")\n", + "lens = SIE(cosmology=cosmo, name=\"lens\")\n", + "source = Sersic(name=\"source\")\n", + "simulator = LensSource(lens, source, pixelscale=pixelscale, pixels_x=pixels)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generating a simulation of a strong gravitational lens" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "z_s = torch.tensor([1.0])\n", + "lens_params = torch.tensor(\n", + " [\n", + " 0.5, # z_l\n", + " 0.0, # x_0\n", + " 0.0, # y_0\n", + " 0.9, # q\n", + " 0.4, # phi\n", + " 1.0, # b (Einstein radius)\n", + " ]\n", + ")\n", + "\n", + "source_params = torch.tensor(\n", + " [\n", + " 0.0, # x_0\n", + " 0.0, # y_0\n", + " 0.5, # q\n", + " 0.9, # phi\n", + " 1.0, # n\n", + " 0.1, # Re\n", + " 10.0, # I_e\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Generate a lensed image\n", + "y = simulator([z_s, lens_params, source_params])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "fig, axs = plt.subplots(1, 3, figsize=(15, 4))\n", + "\n", + "# A meshgrid to show the source\n", + "x = torch.linspace(-0.5, 0.5, 100)\n", + "X, Y = torch.meshgrid(x, x, indexing=\"xy\")\n", + "\n", + "ax = axs[0]\n", + "ax.set_title(r\"Sérsic source\")\n", + "source_im = source.brightness(X, Y, source_params)\n", + "ax.imshow(source_im, origin=\"lower\", extent=(-0.5, 0.5, -0.5, 0.5), cmap=\"gray\")\n", + "ax.set_ylabel(r\"$\\beta_y$ ['']\")\n", + "ax.set_xlabel(r\"$\\beta_x$ ['']\")\n", + "\n", + "ax = axs[1]\n", + "ax.set_title(r\"SIE mass distribution\")\n", + "lens_im = lens.convergence(X * 2, Y * 2, z_s, lens_params)\n", + "ax.imshow(lens_im, origin=\"lower\", extent=(-1, 1, -1, 1), cmap=\"hot\")\n", + "ax.set_ylabel(r\"$\\theta_y$ ['']\")\n", + "ax.set_xlabel(r\"$\\theta_x$ ['']\")\n", + "\n", + "ax = axs[2]\n", + "ax.set_title(r\"Lensed image\")\n", + "ax.imshow(y, origin=\"lower\", extent=(-1, 1, -1, 1), cmap=\"gray\")\n", + "ax.set_ylabel(r\"$\\theta_y$ ['']\")\n", + "ax.set_xlabel(r\"$\\theta_x$ ['']\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualization of the `Simulator` DAG \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simulator.graph(True, True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## **Static** vs **Dynamic** parameters\n", + "\n", + "In the DAG shown above, \n", + "\n", + "- **Dynamic parameters** are shown in white boxes\n", + "\n", + "- **Static parameters** are shown in grey boxes \n", + "\n", + "The distinction between the two types can be summarized as follows\n", + "\n", + "- **Dynamic parameters** are fed as input to the simulator and can be batched over (data parallelism)\n", + "\n", + "- **Static parameters** have fixed values. Their values is stored in the internal DAG, and will be broadcasted over when batching computation\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Making a parameter static\n", + "simulator.z_s = 1.0\n", + "simulator.graph(False, True) # z_s turns grey" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Making a parameter dynamic\n", + "simulator.z_s = None\n", + "simulator.graph(\n", + " False, True\n", + ") # z_s turns white, which makes it disappear when we don't show the dynamic parameters (first option False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simulating a batch of observations\n", + "\n", + "We use `vmap` over the simulator to create a batch of parameters. In this example, we create a batch of examples that only differ by their Einstein radius. To do this, we turn all the other parameter into static parameters. This is done in the hidden cell below" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "# Make all parameters static except the Einstein radius\n", + "simulator.lens.x0 = 0.0\n", + "simulator.lens.y0 = 0.0\n", + "simulator.lens.q = 0.9\n", + "simulator.lens.phi = 0.4\n", + "simulator.lens.b = None # Make sure this one stays Dynamic\n", + "simulator.lens.z_l = 0.5\n", + "\n", + "simulator.source.x0 = 0.0\n", + "simulator.source.y0 = 0.0\n", + "simulator.source.q = 0.5\n", + "simulator.source.phi = 0.5\n", + "simulator.source.n = 1.0\n", + "simulator.source.Re = 0.1\n", + "simulator.source.Ie = 10.0\n", + "\n", + "simulator.z_s = 1.0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from torch import vmap\n", + "\n", + "# Create a grid of Einstein radius\n", + "b = torch.linspace(0.5, 1.5, 5).view(-1, 1) # Shape is [B, 1]\n", + "ys = vmap(simulator)(b)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "fig, axs = plt.subplots(1, 5, figsize=(20, 4))\n", + "\n", + "for i, ax in enumerate(axs.flatten()):\n", + " ax.axis(\"off\")\n", + " ax.imshow(ys[i], cmap=\"gray\")\n", + " ax.set_title(f\"$b = {b[i].item():.2f}$\")\n", + "plt.subplots_adjust(wspace=0, hspace=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Semantic structure of the input\n", + "\n", + "The simulator's input takes different format to allow different usecase scenarios\n", + "1. Flattened tensor for deep neural network like in [Hezaveh et al. (2017)](https://arxiv.org/abs/1708.08842)\n", + "\n", + "2. Semantic List to separate the input int terms of high level modules like Lens and Source\n", + "3. Low-level Dictionary to decompose the parameters at the level of the leafs of the DAG\n", + "\n", + "Below, we illustrate how to use all of these structures. For completeness, we also use `vmap`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make some parameters dynamic for this example\n", + "simulator.source.Ie = None\n", + "simulator.lens.b = None" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Flattened Tensor\n", + "To make sure the order of the parameter is correct, print the simulator. Order of dynamic parameters is shown in the `x_order` field" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simulator" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "B = 5 # Batch dimension\n", + "b = torch.rand(B, 1)\n", + "Ie = torch.rand(B, 1)\n", + "x = torch.concat([b, Ie], dim=1) # Concat along the feature dimension\n", + "\n", + "# Now we can use vmap to simulate multiple images at once\n", + "ys = vmap(simulator)(x)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Semantic lists\n", + "\n", + "A semantic list is simply a list over module parameters like the one we used earlier: `[z_s, lens_params, source_params]`. Note that we could also include cosmological parameters in that list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "# Make some parameters dynamic for this example\n", + "simulator.source.Ie = None\n", + "simulator.lens.b = None\n", + "simulator.lens.x0 = None\n", + "simulator.lens.cosmology.h0 = None\n", + "\n", + "simulator" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "B = 5\n", + "lens_params = torch.randn(B, 2) # x0 and b\n", + "source_params = torch.rand(B, 1) # Ie\n", + "cosmo_params = torch.rand(B, 1) # h0\n", + "\n", + "x = [lens_params, cosmo_params, source_params]\n", + "ys = vmap(simulator)(x)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Low-level Dictionary" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "B = 5\n", + "x0 = torch.randn(B, 1)\n", + "b = torch.randn(B, 1)\n", + "Ie = torch.rand(B, 1)\n", + "h0 = torch.rand(B, 1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x = {\n", + " \"lens\": {\n", + " \"x0\": x0,\n", + " \"b\": b,\n", + " },\n", + " \"source\": {\n", + " \"Ie\": Ie,\n", + " },\n", + " \"cosmo\": {\n", + " \"h0\": h0,\n", + " },\n", + "}\n", + "ys = vmap(simulator)(x)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Computing gradients with automatic differentiation\n", + "\n", + "Computing gradients is particularly useful for optimization. Since taking gradients w.r.t. list or dictionary inputs is not possible with `torch.func.grad`, we will need a small wrapper around the simulator. For optimisation, the wrapper will often be a log likelihood function. For now we use a generic `lambda` wrapper. \n", + "\n", + "In the case of the semantic list input, the wrapper has the general form\n", + "```python\n", + "lambda *x: simulator(x)\n", + "```\n", + "\n", + "The low-level dictionary input is a bit more involved but can be worked out on a case by case basis. \n", + "\n", + "**Note**: apply `vmap` around the gradient function (e.g. `jacfwd` or `grad`) to handle batched computation\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "# Choose some sensible values to compute the gradient\n", + "lens_params = torch.tensor([0.0, 1.0]) # x0 and b\n", + "source_params = torch.tensor([10.0]) # Ie\n", + "cosmo_params = torch.tensor([0.7]) # h0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`jacfwd` will return a list of 3 tensors of shape [B, pixels, pixels, D], where D is the number of parameters in that module" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from torch.func import jacfwd\n", + "\n", + "jac = jacfwd(lambda *x: simulator(x), argnums=(0, 1, 2))(\n", + " lens_params, cosmo_params, source_params\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "fig, axs = plt.subplots(1, 4, figsize=(20, 4))\n", + "\n", + "titles = [\n", + " r\"$\\nabla_{x_0} f(\\mathbf{x})$\",\n", + " r\"$\\nabla_{b} f(\\mathbf{x})$\",\n", + " r\"$\\nabla_{h_0} f(\\mathbf{x})$\",\n", + " r\"$\\nabla_{I_e} f(\\mathbf{x})$\",\n", + "]\n", + "jacs = torch.concat(jac, dim=-1)\n", + "for i, ax in enumerate(axs.flatten()):\n", + " ax.axis(\"off\")\n", + " ax.imshow(jacs[..., i], cmap=\"seismic\", vmin=-10, vmax=10)\n", + " ax.set_title(titles[i], fontsize=18);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pixelated representations\n", + "\n", + "The examples above made use of very simplistic modules. Here, we will showcase how easily we can swap-in flexible representations to represent more realistic systems. \n", + "\n", + "- `Pixelated` is the module used to represent the background source with a grid of pixels\n", + "\n", + "- `PixelatedConvergence` is the module used to represent the convergence of the lens with a grid of pixels\n", + "\n", + "For this example, we will use source samples from the PROBES dataset ([Stone et al., 2019](https://iopscience.iop.org/article/10.3847/1538-4357/ab3126/meta#:~:text=The%20intrinsic%20scatter%20of%20the%20baryonic%20RAR%20is%20predicted%20by,null%20value%20reported%20by%20L17.)) and convergence maps sampled from Illustris TNG ([Nelson et al., 2019](https://comp-astrophys-cosmol.springeropen.com/articles/10.1186/s40668-019-0028-x), see [Adam et al., 2023](https://iopscience.iop.org/article/10.3847/1538-4357/accf84/meta) for preprocessing, or use this [link](https://zenodo.org/records/6555463/files/hkappa128hst_TNG100_rau_trainset.h5?download=1) to download the maps). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from caustics import Pixelated, PixelatedConvergence\n", + "\n", + "# Some static parameters for the simulator\n", + "pixelscale = 0.07\n", + "source_pixelscale = 0.25 * pixelscale\n", + "z_l = 0.5\n", + "z_s = 1.0\n", + "x0 = 0\n", + "y0 = 0\n", + "\n", + "# Construct the Simulator with Pixelated and PixalatedConvergence modules\n", + "cosmo = FlatLambdaCDM(name=\"cosmo\")\n", + "source = Pixelated(\n", + " name=\"source\", shape=(256, 256), pixelscale=source_pixelscale, x0=x0, y0=y0\n", + ")\n", + "lens = PixelatedConvergence(\n", + " cosmology=cosmo,\n", + " name=\"lens\",\n", + " pixelscale=pixelscale,\n", + " shape=(128, 128),\n", + " z_l=z_l,\n", + ")\n", + "simulator = LensSource(lens, source, pixelscale=pixelscale, pixels_x=pixels, z_s=z_s)\n", + "\n", + "simulator.graph(True, True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the hidden cell below, we load the maps from a dataset. If you downloaded the datasets mentioned above, you can use the code below to load maps from them. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "# import h5py\n", + "\n", + "# B = 10\n", + "# path_to_kappa_maps = \"/path/to/hkappa128hst_TNG100_rau_trainset.h5\" # modify this to your system path\n", + "# index = [250] + sorted(list(np.random.randint(251, 1000, size=B-1)))\n", + "# kappa_map = torch.tensor(h5py.File(path_to_kappa_maps, \"r\")[\"kappa\"][index])\n", + "\n", + "# path_to_source_maps = \"/path/to/probes.h5\" # modify this to your system path\n", + "# index = [101] + sorted(list(np.random.randint(251, 1000, size=B-1)))\n", + "# filter_ = 0 # grz filters: 0 is g, etc.\n", + "# source_map = torch.tensor(\n", + "# h5py.File(path_to_source_maps, \"r\")[\"galaxies\"][index, ..., filter_]\n", + "# )\n", + "\n", + "# Load saved assets for demonstration\n", + "kappa_maps = torch.tensor(\n", + " np.load(\"assets/kappa_maps.npz\", allow_pickle=True)[\"kappa_maps\"]\n", + ")\n", + "source_maps = torch.tensor(\n", + " np.load(\"assets/source_maps.npz\", allow_pickle=True)[\"source_maps\"]\n", + ")\n", + "\n", + "# Cherry picked example\n", + "source_map = source_maps[0]\n", + "kappa_map = kappa_maps[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Make a simulation by feeding the maps as input to the simulator (using semantic list inputs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "y = simulator([kappa_map, source_map])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "fig, axs = plt.subplots(1, 3, figsize=(15, 4))\n", + "\n", + "beta_extent = [\n", + " -source_pixelscale * source_map.shape[0] / 2,\n", + " source_pixelscale * source_map.shape[0] / 2,\n", + "] * 2\n", + "\n", + "ax = axs[0]\n", + "ax.set_title(r\"Source map\")\n", + "ax.imshow(source_map, origin=\"lower\", cmap=\"gray\", extent=beta_extent)\n", + "ax.set_ylabel(r\"$\\beta_y$ ['']\")\n", + "ax.set_xlabel(r\"$\\beta_x$ ['']\")\n", + "\n", + "theta_extent = [-pixelscale * pixels / 2, pixelscale * pixels / 2] * 2\n", + "\n", + "ax = axs[1]\n", + "ax.set_title(r\"Convergence map\")\n", + "ax.imshow(\n", + " kappa_map,\n", + " origin=\"lower\",\n", + " cmap=\"hot\",\n", + " extent=theta_extent,\n", + " norm=plt.cm.colors.LogNorm(vmin=1e-1, vmax=10),\n", + ")\n", + "ax.set_ylabel(r\"$\\theta_y$ ['']\")\n", + "ax.set_xlabel(r\"$\\theta_x$ ['']\")\n", + "ax.set_title(r\"Convergence map\")\n", + "\n", + "ax = axs[2]\n", + "ax.set_title(r\"Lensed image\")\n", + "ax.imshow(y, origin=\"lower\", extent=theta_extent, cmap=\"gray\")\n", + "ax.set_ylabel(r\"$\\theta_y$ ['']\")\n", + "ax.set_xlabel(r\"$\\theta_x$ ['']\")\n", + "ax.set_title(r\"Lensed image\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Of course, batching works the same way as before and is super fast. Below, we show the time it takes to make 4 batched simulations on a laptop." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%timeit\n", + "\n", + "ys = vmap(simulator)([kappa_maps, source_maps])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "fig, axs = plt.subplots(3, 3, figsize=(9, 9))\n", + "\n", + "ys = vmap(simulator)([kappa_maps, source_maps])\n", + "for i in range(3):\n", + " ax = axs[i, 0]\n", + " ax.axis(\"off\")\n", + " ax.imshow(\n", + " source_maps[len(ys) - 1 - i],\n", + " origin=\"lower\",\n", + " cmap=\"gray\",\n", + " norm=plt.cm.colors.LogNorm(vmin=1e-2, vmax=1, clip=True),\n", + " )\n", + "\n", + " ax = axs[i, 1]\n", + " ax.axis(\"off\")\n", + " ax.imshow(\n", + " kappa_maps[len(ys) - 1 - i],\n", + " origin=\"lower\",\n", + " cmap=\"hot\",\n", + " norm=plt.cm.colors.LogNorm(vmin=1e-1, vmax=10),\n", + " )\n", + "\n", + " ax = axs[i, 2]\n", + " ax.axis(\"off\")\n", + " ax.imshow(\n", + " ys[len(ys) - 1 - i],\n", + " origin=\"lower\",\n", + " cmap=\"gray\",\n", + " norm=plt.cm.colors.LogNorm(vmin=1e-2, vmax=1, clip=True),\n", + " )\n", + "axs[0, 0].set_title(r\"Source map\")\n", + "axs[0, 1].set_title(r\"Convergence map\")\n", + "axs[0, 2].set_title(r\"Lensed image\")\n", + "plt.subplots_adjust(wspace=0, hspace=0);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating your own Simulator\n", + "\n", + "Here, we only introduce the general design principles to create a Simulator. Worked examples can be found in [this notebook](./Simulators.ipynb). \n", + "\n", + "### A Simulator is very much like a neural network in Pytorch\n", + "A simulator inherits from the super class `Simulator`, similar to how a neural network inherits from the `nn.Module` class in `Pytorch`\n", + "\n", + "```python\n", + "from caustics import Simulator\n", + "\n", + "class MySim(Simulator):\n", + " def __init__(self):\n", + " super().__init__()\n", + "\n", + " def forward(self, x):\n", + " ...\n", + "```\n", + "\n", + "- The `init` method constructs the computation graph, initialize the `caustics` modules, and can prepare or store variables for the `forward` method. \n", + "- The `forward` method is where the actual simulation happens.\n", + "- `x` generally denotes a set of parameters which affect the computations in the simulator graph. \n", + "\n", + "### How to use a Simulator in your workflow\n", + "Like a neural network, `MySim` (and in general any `caustics` modules), must be instantiated **outside** the main workload. This is because `caustics` builds a graph internally every time a module is created. Ideally, this happens only once to avoid overhead. In general, you can follow the following code pattern\n", + "```python\n", + "\n", + "# Instantiation\n", + "simulator = MySim()\n", + "\n", + "# Heavy workload\n", + "for n in range(N):\n", + " y = vmap(simulator)(x)\n", + "```\n", + "This allows you to perform inefficient computations that only need to happen once in the `__init__` method while keeping your forward method lightweight.\n", + "\n", + "### How to feed parameters to the different modules\n", + "\n", + "This is probably the easiest part of building a Simulator. Just feed `x` at the end of each method. And you are done. \n", + " \n", + " Here is a minimal example that shows how to feed the parameters `x` to different modules in the `forward` method\n", + " ```python\n", + " def forward(self, x):\n", + " alpha_x, alpha_y = self.lens.reduced_deflection_angle(self.theta_x, self.theta_y, self.z_s, x)\n", + " beta_x = self.theta_x - alpha_x # lens equation\n", + " ...\n", + " lensed_image = self.source.brightness(beta_x, beta_y, x)\n", + " ``` \n", + "\n", + "You might worry that `x` can have a relatively complex structure (flattened tensor, semantic lict, low-level dictionary). \n", + "`caustics` handles this complexity for you. \n", + "You only need to make sure that `x` contains all the **dynamic** parameters required by your custom simulator. \n", + "This design works for every `caustics` module and each of their methods, meaning that `x` is always the last argument in a `caustics` method call signature. \n", + " \n", + "The only details that you need to handle explicitly in your own simulator are stuff like the camera pixel position (`theta_x` and `theta_y`), and source redshifts (`z_s`). Those are often constructed in the `__init__` method because they can be assumed fixed. Thus, the example above assumed that they can be retrieved from the `self` registry. A Simulator is often an abstraction of an instrument with many fixed variables to describe it, or aimed at a specific observation. \n", + "\n", + "Of course, you could have more complex workflows for which this assumption is not true. For example, you might want to infer the PSF parameters of your instrument and need to feed this to the simulator as a dynamic parameter. \n", + "The next section has what you need to customize completely your simulator\n", + "\n", + "### Creating your own variables as leafs in the DAG\n", + "\n", + "You can register new variables in the DAG for custom calculations as follows" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from caustics import Simulator\n", + "\n", + "\n", + "class MySim(Simulator):\n", + " def __init__(self):\n", + " super().__init__() # Don't forget to use super!!\n", + " # shape has to be a tuple, e.g. shape=(1,). This can be any shape you need.\n", + " self.add_param(\n", + " \"my_dynamic_arg\", value=None, shape=(1,)\n", + " ) # register a dynamic parameter in the DAG\n", + " self.add_param(\n", + " \"my_static_arg\", value=1.0, shape=(1,)\n", + " ) # register a static parameter in the DAG\n", + "\n", + " def forward(self, x):\n", + " my_arg = x[\"MySim\"][0] # retrieve your arguments\n", + " my_arg2 = x[\"MySim\"][1]\n", + "\n", + " # My very complex workflow\n", + " ...\n", + "\n", + "\n", + "sim = MySim()\n", + "sim.graph(True, True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "caustic", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/source/tutorials/InvertLensEquation.ipynb b/docs/source/tutorials/InvertLensEquation.ipynb index 777ec174..a8da032c 100644 --- a/docs/source/tutorials/InvertLensEquation.ipynb +++ b/docs/source/tutorials/InvertLensEquation.ipynb @@ -48,7 +48,7 @@ "res = 0.05\n", "upsample_factor = 1\n", "fov = res * n_pix\n", - "thx, thy = caustics.utils.get_meshgrid(\n", + "thx, thy = caustics.utils.meshgrid(\n", " res / upsample_factor,\n", " upsample_factor * n_pix,\n", " upsample_factor * n_pix,\n", diff --git a/docs/source/tutorials/LensZoo.ipynb b/docs/source/tutorials/LensZoo.ipynb index 8bf0b44b..a5b1e8ba 100644 --- a/docs/source/tutorials/LensZoo.ipynb +++ b/docs/source/tutorials/LensZoo.ipynb @@ -54,10 +54,9 @@ "res = 0.05\n", "upsample_factor = 2\n", "fov = res * n_pix\n", - "thx, thy = caustics.utils.get_meshgrid(\n", + "thx, thy = caustics.utils.meshgrid(\n", " res / upsample_factor,\n", " upsample_factor * n_pix,\n", - " upsample_factor * n_pix,\n", " dtype=torch.float32,\n", ")\n", "z_l = torch.tensor(0.5, dtype=torch.float32)\n", @@ -91,7 +90,7 @@ " y0=0.0,\n", " th_ein=1.0,\n", ")\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens,\n", " source=base_sersic,\n", " pixelscale=res,\n", @@ -132,7 +131,7 @@ " y0=0.0,\n", " th_ein=1.0,\n", ")\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens,\n", " source=base_sersic,\n", " pixelscale=res,\n", @@ -178,7 +177,7 @@ " phi=np.pi / 2,\n", " b=1.0,\n", ")\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens,\n", " source=base_sersic,\n", " pixelscale=res,\n", @@ -225,7 +224,7 @@ " b=1.0,\n", " t=0.5,\n", ")\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens,\n", " source=base_sersic,\n", " pixelscale=res,\n", @@ -272,7 +271,7 @@ " m=1e13,\n", " c=20.0,\n", ")\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens,\n", " source=base_sersic,\n", " pixelscale=res,\n", @@ -322,7 +321,7 @@ " scale_radius=1.0,\n", " tau=3.0,\n", ")\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens,\n", " source=base_sersic,\n", " pixelscale=res,\n", @@ -372,7 +371,7 @@ " core_radius=5e-1,\n", " scale_radius=15.0,\n", ")\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens,\n", " source=base_sersic,\n", " pixelscale=res,\n", @@ -417,7 +416,7 @@ " gamma_1=1.0,\n", " gamma_2=-1.0,\n", ")\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens,\n", " source=base_sersic,\n", " pixelscale=res,\n", @@ -459,7 +458,7 @@ " y0=0.0,\n", " surface_density=1.5,\n", ")\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens,\n", " source=base_sersic,\n", " pixelscale=res,\n", diff --git a/docs/source/tutorials/MultiplaneDemo.ipynb b/docs/source/tutorials/MultiplaneDemo.ipynb index e96aa09a..c3c41b9f 100644 --- a/docs/source/tutorials/MultiplaneDemo.ipynb +++ b/docs/source/tutorials/MultiplaneDemo.ipynb @@ -46,10 +46,9 @@ "res = 0.5\n", "upsample_factor = 2\n", "fov = res * n_pix\n", - "thx, thy = caustics.utils.get_meshgrid(\n", + "thx, thy = caustics.utils.meshgrid(\n", " res / upsample_factor,\n", " upsample_factor * n_pix,\n", - " upsample_factor * n_pix,\n", " dtype=torch.float32,\n", ")\n", "z_s = torch.tensor(1.5, dtype=torch.float32)" @@ -244,7 +243,7 @@ "\n", "plt.imshow(TD.detach().cpu().numpy(), origin=\"lower\")\n", "plt.colorbar()\n", - "plt.title(\"Time Delay (seconds)\")\n", + "plt.title(\"Time Delay (days)\")\n", "plt.show()" ] }, diff --git a/docs/source/tutorials/Parameters.ipynb b/docs/source/tutorials/Parameters.ipynb index ddf2fcda..ed88a1f6 100644 --- a/docs/source/tutorials/Parameters.ipynb +++ b/docs/source/tutorials/Parameters.ipynb @@ -61,7 +61,7 @@ "lens_light = caustics.Sersic(name=\"lenslight\", x0=0.0, phi=1.3, Re=1.0)\n", "\n", "# A simulator which captures all these parameters into a single DAG\n", - "sim = caustics.Lens_Source(\n", + "sim = caustics.LensSource(\n", " lens=lens, source=source, lens_light=lens_light, pixelscale=0.05, pixels_x=100\n", ")" ] @@ -81,7 +81,7 @@ "metadata": {}, "outputs": [], "source": [ - "sim.get_graph(True, True)" + "sim.graph(True, True)" ] }, { @@ -93,7 +93,7 @@ "source": [ "# Accessing a parameter and giving it a value will turn it into a static parameter\n", "sim.SIE.phi = 0.4\n", - "sim.get_graph(True, True)" + "sim.graph(True, True)" ] }, { @@ -105,7 +105,7 @@ "source": [ "# Accessing a parameter and setting it to None will turn it into a dynamic parameter\n", "sim.lenslight.x0 = None\n", - "sim.get_graph(True, True)" + "sim.graph(True, True)" ] }, { diff --git a/docs/source/tutorials/Playground.ipynb b/docs/source/tutorials/Playground.ipynb index f4e49419..185f7439 100644 --- a/docs/source/tutorials/Playground.ipynb +++ b/docs/source/tutorials/Playground.ipynb @@ -41,10 +41,9 @@ "res = 0.05\n", "upsample_factor = 2\n", "fov = res * n_pix\n", - "thx, thy = caustics.utils.get_meshgrid(\n", + "thx, thy = caustics.utils.meshgrid(\n", " res / upsample_factor,\n", " upsample_factor * n_pix,\n", - " upsample_factor * n_pix,\n", " dtype=torch.float32,\n", ")\n", "z_l = torch.tensor(0.5, dtype=torch.float32)\n", diff --git a/docs/source/tutorials/Simulators.ipynb b/docs/source/tutorials/Simulators.ipynb index 1174a4a1..8edc7e4f 100644 --- a/docs/source/tutorials/Simulators.ipynb +++ b/docs/source/tutorials/Simulators.ipynb @@ -41,10 +41,9 @@ "res = 0.05\n", "upsample_factor = 2\n", "fov = res * n_pix\n", - "thx, thy = caustics.utils.get_meshgrid(\n", + "thx, thy = caustics.utils.meshgrid(\n", " res / upsample_factor,\n", " upsample_factor * n_pix,\n", - " upsample_factor * n_pix,\n", " dtype=torch.float32,\n", ")\n", "z_l = torch.tensor(0.5, dtype=torch.float32)\n", @@ -104,7 +103,7 @@ "\n", "sim = Simple_Sim(sie, src, torch.tensor(0.8))\n", "\n", - "sim.get_graph(True, True)" + "sim.graph(True, True)" ] }, { @@ -153,7 +152,7 @@ "\n", "sim2 = Simple_Sim(sie, src, torch.tensor(0.8))\n", "\n", - "sim2.get_graph(True, True)" + "sim2.graph(True, True)" ] }, { @@ -217,7 +216,7 @@ "\n", "simf = Simple_Sim(sief, srcf, z_s=torch.tensor(0.8))\n", "\n", - "simf.get_graph(True, True)" + "simf.graph(True, True)" ] }, { diff --git a/docs/source/tutorials/VisualizeCaustics.ipynb b/docs/source/tutorials/VisualizeCaustics.ipynb index 51345450..1d594c4b 100644 --- a/docs/source/tutorials/VisualizeCaustics.ipynb +++ b/docs/source/tutorials/VisualizeCaustics.ipynb @@ -49,10 +49,9 @@ "res = 0.05\n", "upsample_factor = 2\n", "fov = res * n_pix\n", - "thx, thy = caustics.utils.get_meshgrid(\n", + "thx, thy = caustics.utils.meshgrid(\n", " res / upsample_factor,\n", " upsample_factor * n_pix,\n", - " upsample_factor * n_pix,\n", " dtype=torch.float32,\n", ")\n", "z_l = torch.tensor(0.5, dtype=torch.float32)\n", diff --git a/docs/source/tutorials/assets/kappa_maps.npz b/docs/source/tutorials/assets/kappa_maps.npz new file mode 100644 index 00000000..16528c01 Binary files /dev/null and b/docs/source/tutorials/assets/kappa_maps.npz differ diff --git a/docs/source/tutorials/assets/source_maps.npz b/docs/source/tutorials/assets/source_maps.npz new file mode 100644 index 00000000..de1fa908 Binary files /dev/null and b/docs/source/tutorials/assets/source_maps.npz differ diff --git a/docs/source/tutorials/example.yml b/docs/source/tutorials/example.yml index 8d83cdeb..c9a457f9 100644 --- a/docs/source/tutorials/example.yml +++ b/docs/source/tutorials/example.yml @@ -18,7 +18,7 @@ lnslt: &lnslt simulator: name: minisim - kind: Lens_Source + kind: LensSource init_kwargs: # Single lense lens: *lens diff --git a/docs/source/websim.rst b/docs/source/websim.rst index 0f6ecf79..f74e1e27 100644 --- a/docs/source/websim.rst +++ b/docs/source/websim.rst @@ -6,3 +6,12 @@ Here is a little online simulator run using caustics! It's slow because it's run Pro tip: check out the "Pixelated" source to lens any image you want! `Launch the simulator! `_ + +For frequent simulator users (e.g., if you plan on exploring the parameter space of a lens), we recommend installing the simulator locally and running it in your browser. Follow the steps below: + +1. Install Caustics. Please follow the instructions on the `install page `_. +2. ``pip install streamlit`` +3. ``git clone https://github.com/Ciela-Institute/caustics-webapp.git`` +4. Move into the ``caustics-webapp/gui/`` directory and run the following command: ``streamlit run streamlit_app.py --server.enableXsrfProtection=false`` + +If you were successful in installing the simulator, Step 4 should automatically open the simulator in your default browser. diff --git a/src/caustics/__init__.py b/src/caustics/__init__.py index 5e514a5d..ef1d231f 100644 --- a/src/caustics/__init__.py +++ b/src/caustics/__init__.py @@ -13,6 +13,7 @@ EPL, ExternalShear, PixelatedConvergence, + PixelatedPotential, Multiplane, NFW, Point, @@ -22,22 +23,22 @@ SinglePlane, MassSheet, TNFW, + EnclosedMass, ) from .light import ( Source, Pixelated, PixelatedTime, Sersic, -) # PROBESDataset conflicts with .data -from .data import HDF5Dataset, IllustrisKappaDataset, PROBESDataset +) from . import utils -from .sims import Lens_Source, Simulator +from .sims import LensSource, Microlens, Simulator from .tests import test from .models.api import build_simulator from . import func __version__ = VERSION -__author__ = "Ciela" +__author__ = "Ciela Institute" __all__ = [ "Cosmology", @@ -50,6 +51,7 @@ "EPL", "ExternalShear", "PixelatedConvergence", + "PixelatedPotential", "Multiplane", "NFW", "Point", @@ -59,15 +61,14 @@ "SinglePlane", "MassSheet", "TNFW", + "EnclosedMass", "Source", "Pixelated", "PixelatedTime", "Sersic", - "HDF5Dataset", - "IllustrisKappaDataset", - "PROBESDataset", "utils", - "Lens_Source", + "LensSource", + "Microlens", "Simulator", "test", "build_simulator", diff --git a/src/caustics/constants.py b/src/caustics/constants.py index 5ac51fa3..3153d611 100644 --- a/src/caustics/constants.py +++ b/src/caustics/constants.py @@ -21,4 +21,5 @@ G_over_c2 = float((_G_astropy / _c_astropy**2).to("Mpc/solMass").value) # type: ignore c_Mpc_s = float(_c_astropy.to("Mpc/s").value) km_to_Mpc = 3.2407792896664e-20 # TODO: use astropy +days_to_seconds = 24.0 * 60.0 * 60.0 # fmt: on diff --git a/src/caustics/data/__init__.py b/src/caustics/data/__init__.py deleted file mode 100644 index 5702b575..00000000 --- a/src/caustics/data/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .hdf5dataset import HDF5Dataset -from .illustris_kappa import IllustrisKappaDataset -from .probes import PROBESDataset - -__all__ = ["HDF5Dataset", "IllustrisKappaDataset", "PROBESDataset"] diff --git a/src/caustics/data/hdf5dataset.py b/src/caustics/data/hdf5dataset.py deleted file mode 100644 index 11c43311..00000000 --- a/src/caustics/data/hdf5dataset.py +++ /dev/null @@ -1,58 +0,0 @@ -from typing import Dict, List, Union - -import h5py -import torch -from torch import Tensor -from torch.utils.data import Dataset - -__all__ = ("HDF5Dataset",) - - -class HDF5Dataset(Dataset): - """ - Light-weight HDF5 dataset that reads all the data into tensors. Assumes all - groups in dataset have the same length. - """ - - def __init__( - self, - path: str, - keys: List[str], - device: torch.device = torch.device("cpu"), - dtypes: Union[Dict[str, torch.dtype], torch.dtype] = torch.float32, - ): - """ - Parameters - ---------- - path: string - location of dataset. - keys: List[str] - dataset keys to read. - device: torch.deviec - the device for torch - dtypes: torch.dtype - either a numpy datatype to which the items will be converted - or a dictionary specifying the datatype corresponding to each key. - """ - super().__init__() - self.keys = keys - self.dtypes = dtypes - with h5py.File(path, "r") as f: - if isinstance(dtypes, dict): - self.data = { - k: torch.tensor(f[k][:], device=device, dtype=dtypes[k]) - for k in keys - } - else: - self.data = { - k: torch.tensor(f[k][:], device=device, dtype=dtypes) for k in keys - } - - def __len__(self): - return len(self.data[self.keys[0]]) - - def __getitem__(self, i: Union[int, slice]) -> Dict[str, Tensor]: - """ - Retrieves the data at index `i` for each key. - """ - return {k: self.data[k][i] for k in self.keys} diff --git a/src/caustics/data/illustris_kappa.py b/src/caustics/data/illustris_kappa.py deleted file mode 100644 index b5d94da3..00000000 --- a/src/caustics/data/illustris_kappa.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import Union - -import torch -from torch import Tensor - -from .hdf5dataset import HDF5Dataset - -__all__ = ("IllustrisKappaDataset",) - - -class IllustrisKappaDataset: - def __init__( - self, - path: str, - device: torch.device = torch.device("cpu"), - dtype: torch.dtype = torch.float32, - ): - super().__init__() - self.key = "kappa" - self.ds = HDF5Dataset(path, [self.key], device, dtype) - assert self[0].shape[-1] == self[0].shape[-2] - self.n_pix = self[0].shape[-1] - - def __len__(self): - return len(self.ds) - - def __getitem__(self, i: Union[int, slice]) -> Tensor: - return self.ds[i][self.key] diff --git a/src/caustics/data/probes.py b/src/caustics/data/probes.py deleted file mode 100644 index 05df1304..00000000 --- a/src/caustics/data/probes.py +++ /dev/null @@ -1,32 +0,0 @@ -from typing import Union - -import torch -from torch import Tensor - -from .hdf5dataset import HDF5Dataset - -__all__ = ("PROBESDataset",) - - -class PROBESDataset: - def __init__( - self, - path: str, - device: torch.device = torch.device("cpu"), - dtype: torch.dtype = torch.float32, - ): - super().__init__() - self.key = "galaxies" - self.ds = HDF5Dataset(path, [self.key], device, dtype) - - def __len__(self): - return len(self.ds) - - def __getitem__(self, i: Union[int, slice]) -> Tensor: - """ - Returns - ------- - Tensor - image `i` with channel as first dimension. - """ - return self.ds[i][self.key].movedim(-1, 0) diff --git a/src/caustics/func.py b/src/caustics/func.py index 0adc394f..9d04281a 100644 --- a/src/caustics/func.py +++ b/src/caustics/func.py @@ -2,6 +2,7 @@ forward_raytrace, physical_from_reduced_deflection_angle, reduced_from_physical_deflection_angle, + time_delay_arcsec2_to_days, reduced_deflection_angle_sie, potential_sie, convergence_sie, @@ -53,6 +54,7 @@ "forward_raytrace", "physical_from_reduced_deflection_angle", "reduced_from_physical_deflection_angle", + "time_delay_arcsec2_to_days", "reduced_deflection_angle_sie", "potential_sie", "convergence_sie", diff --git a/src/caustics/lenses/__init__.py b/src/caustics/lenses/__init__.py index 77cbe3c4..411ef2fc 100644 --- a/src/caustics/lenses/__init__.py +++ b/src/caustics/lenses/__init__.py @@ -2,6 +2,7 @@ from .epl import EPL from .external_shear import ExternalShear from .pixelated_convergence import PixelatedConvergence +from .pixelated_potential import PixelatedPotential from .nfw import NFW from .point import Point from .pseudo_jaffe import PseudoJaffe @@ -11,6 +12,7 @@ from .mass_sheet import MassSheet from .tnfw import TNFW from .multiplane import Multiplane +from .enclosed_mass import EnclosedMass __all__ = [ @@ -19,6 +21,7 @@ "EPL", "ExternalShear", "PixelatedConvergence", + "PixelatedPotential", "Multiplane", "NFW", "Point", @@ -28,4 +31,5 @@ "SinglePlane", "MassSheet", "TNFW", + "EnclosedMass", ] diff --git a/src/caustics/lenses/base.py b/src/caustics/lenses/base.py index 1d24edfd..11673e3b 100644 --- a/src/caustics/lenses/base.py +++ b/src/caustics/lenses/base.py @@ -7,10 +7,9 @@ import torch from torch import Tensor -from ..constants import arcsec_to_rad, c_Mpc_s from ..cosmology import Cosmology from ..parametrized import Parametrized, unpack -from .utils import get_magnification +from .utils import magnification from ..packed import Packed from . import func @@ -121,7 +120,7 @@ def magnification( *Unit: unitless* """ - return get_magnification(partial(self.raytrace, params=params), x, y, z_s) + return magnification(partial(self.raytrace, params=params), x, y, z_s) @unpack def forward_raytrace( @@ -1006,15 +1005,15 @@ def raytrace( ax, ay = self.reduced_deflection_angle(x, y, z_s, params, **kwargs) return x - ax, y - ay - @staticmethod - def _arcsec2_to_time(z_l, z_s, cosmology, params): + def _arcsec2_to_days(self, z_l, z_s, params): """ - This method is used by :func:`caustics.lenses.ThinLens.time_delay` to convert arcsec^2 to seconds in the context of gravitational time delays. + This method is used by :func:`caustics.lenses.ThinLens.time_delay` to + convert arcsec^2 to days in the context of gravitational time delays. """ - d_l = cosmology.angular_diameter_distance(z_l, params) - d_s = cosmology.angular_diameter_distance(z_s, params) - d_ls = cosmology.angular_diameter_distance_z1z2(z_l, z_s, params) - return (1 + z_l) / c_Mpc_s * d_s * d_l / d_ls * arcsec_to_rad**2 + d_l = self.cosmology.angular_diameter_distance(z_l, params) + d_s = self.cosmology.angular_diameter_distance(z_s, params) + d_ls = self.cosmology.angular_diameter_distance_z1z2(z_l, z_s, params) + return func.time_delay_arcsec2_to_days(d_l, d_s, d_ls, z_l) @unpack def time_delay( @@ -1032,13 +1031,15 @@ def time_delay( """ Computes the gravitational time delay for light passing through the lens at given coordinates. - This time delay is induced by the photons travelling through a gravitational potential well (Shapiro time delay) plus the effect of the increased path length that the photons must traverse (geometric time delay). - The main equation involved here is the following:: + This time delay is induced by the photons traveling through a gravitational potential well (Shapiro time delay) plus the effect of the increased path length that the photons must traverse (geometric time delay). + The main equation involved here is the following: - \Delta t = \frac{1 + z_l}{c} \frac{D_s}{D_l D_{ls}} \left[ \frac{1}{2}|\vec{\alpha}(\vec{\theta})|^2 - \psi(\vec{\theta}) \right] + .. math:: - where :math:`\vec{\alpha}(\vec{\theta})` is the deflection angle, - :math:`\psi(\vec{\theta})` is the lensing potential, + \\Delta t = \\frac{1 + z_l}{c} \\frac{D_s}{D_l D_{ls}} \\left[ \\frac{1}{2}|\\vec{\\alpha}(\\vec{\\theta})|^2 - \psi(\\vec{\\theta}) \\right] + + where :math:`\\vec{\\alpha}(\\vec{\\theta})` is the deflection angle, + :math:`\\psi(\\vec{\\theta})` is the lensing potential, :math:`D_l` is the comoving distance to the lens, :math:`D_s` is the comoving distance to the source, and :math:`D_{ls}` is the comoving distance between the lens and the source. In the above equation, the first term is the geometric time delay and the second term is the gravitational time delay. @@ -1079,7 +1080,7 @@ def time_delay( Tensor Time delay at the given coordinates. - *Unit: seconds* + *Unit: days* References ---------- @@ -1090,13 +1091,13 @@ def time_delay( if shapiro_time_delay: potential = self.potential(x, y, z_s, params) - TD -= potential + TD = TD - potential if geometric_time_delay: ax, ay = self.physical_deflection_angle(x, y, z_s, params) fp = 0.5 * (ax**2 + ay**2) - TD += fp + TD = TD + fp - factor = self._arcsec2_to_time(z_l, z_s, self.cosmology, params) + factor = self._arcsec2_to_days(z_l, z_s, params) return factor * TD diff --git a/src/caustics/lenses/enclosed_mass.py b/src/caustics/lenses/enclosed_mass.py new file mode 100644 index 00000000..f37c37f7 --- /dev/null +++ b/src/caustics/lenses/enclosed_mass.py @@ -0,0 +1,280 @@ +# mypy: disable-error-code="operator,union-attr,dict-item" +from typing import Optional, Union, Annotated, Callable + +from torch import Tensor + +from .base import ThinLens, CosmologyType, NameType, ZLType +from ..parametrized import unpack +from ..packed import Packed +from .func import physical_deflection_angle_enclosed_mass, convergence_enclosed_mass + +__all__ = ("EnclosedMass",) + + +class EnclosedMass(ThinLens): + """ + A class for representing a lens with an enclosed mass profile. This generic + lens profile can represent any lens with a mass distribution that can be + described by a function that returns the enclosed mass as a function of + radius. + """ + + _null_params = { + "x0": 0.0, + "y0": 0.0, + "q": 1.0, + "phi": 0.0, + "p": 1.0, + } + + def __init__( + self, + cosmology: CosmologyType, + enclosed_mass: Callable, + z_l: ZLType = None, + x0: Annotated[ + Optional[Union[Tensor, float]], "The x-coordinate of the lens center", True + ] = None, + y0: Annotated[ + Optional[Union[Tensor, float]], "The y-coordinate of the lens center", True + ] = None, + q: Annotated[ + Optional[Union[Tensor, float]], "The axis ratio of the lens", True + ] = None, + phi: Annotated[ + Optional[Union[Tensor, float]], "The position angle of the lens", True + ] = None, + p: Annotated[ + Optional[Union[Tensor, list[float]]], + "parameters for the enclosed mass function", + True, + ] = None, + s: Annotated[ + float, "Softening parameter to prevent numerical instabilities" + ] = 0.0, + name: NameType = None, + **kwargs, + ): + """ + Initialize an enclosed mass lens. + + Parameters + ---------- + name : str + The name of the lens. + + cosmology : Cosmology + The cosmology object that describes the Universe. + + enclosed_mass : callable + A function that takes a radius and a set of parameters and returns the enclosed mass. Should be of the form ``enclosed_mass(r, p) -> M`` and returns units of Msun. + + *Unit: Msun* + + z_l : float + The redshift of the lens. + + *Unit: unitless* + + x0 : float or Tensor, optional + The x-coordinate of the lens center. + + *Unit: arcsec* + + y0 : float or Tensor, optional + The y-coordinate of the lens center. + + *Unit: arcsec* + + q : float or Tensor, optional + The axis ratio of the lens. ratio of semi-minor to semi-major axis (b/a). + + *Unit: unitless* + + phi : float or Tensor, optional + The position angle of the lens. + + *Unit: radians* + + p : list[float] or Tensor, optional + The parameters for the enclosed mass function. + + *Unit: user-defined* + + s : float, optional + Softening parameter to prevent numerical instabilities. + + *Unit: arcsec* + """ + super().__init__(cosmology, z_l, name=name, **kwargs) + self.enclosed_mass = enclosed_mass + + self.add_param("x0", x0) + self.add_param("y0", y0) + self.add_param("q", q) + self.add_param("phi", phi) + self.add_param("p", p) + + self.s = s + + @unpack + def physical_deflection_angle( + self, + x: Tensor, + y: Tensor, + z_s: Tensor, + *args, + params: Optional[Packed] = None, + z_l: Optional[Tensor] = None, + x0: Optional[Tensor] = None, + y0: Optional[Tensor] = None, + q: Optional[Tensor] = None, + phi: Optional[Tensor] = None, + p: Optional[Tensor] = None, + **kwargs, + ) -> tuple[Tensor, Tensor]: + """ + Calculate the physical deflection angle of the lens at a given position. + + Parameters + ---------- + name : str + The name of the lens. + + cosmology : Cosmology + The cosmology object that describes the Universe. + + z_l : float + The redshift of the lens. + + *Unit: unitless* + + x0 : float or Tensor, optional + The x-coordinate of the lens center. + + *Unit: arcsec* + + y0 : float or Tensor, optional + The y-coordinate of the lens center. + + *Unit: arcsec* + + q : float or Tensor, optional + The axis ratio of the lens. ratio of semi-minor to semi-major axis (b/a). + + *Unit: unitless* + + phi : float or Tensor, optional + The position angle of the lens. + + *Unit: radians* + + p : list[float] or Tensor, optional + The parameters for the enclosed mass function. + + *Unit: user-defined* + + Returns + ------- + The physical deflection angle at the given position. [Tensor, Tensor] + + *Unit: arcsec* + """ + return physical_deflection_angle_enclosed_mass( + x0, y0, q, phi, lambda r: self.enclosed_mass(r, p), x, y, self.s + ) + + @unpack + def potential( + self, + x: Tensor, + y: Tensor, + z_s: Tensor, + *args, + params: Optional[Packed] = None, + z_l: Optional[Tensor] = None, + x0: Optional[Tensor] = None, + y0: Optional[Tensor] = None, + p: Optional[Tensor] = None, + **kwargs, + ) -> Tensor: + raise NotImplementedError( + "Potential is not implemented for enclosed mass profiles." + ) + + @unpack + def convergence( + self, + x: Tensor, + y: Tensor, + z_s: Tensor, + *args, + params: Optional[Packed] = None, + z_l: Optional[Tensor] = None, + x0: Optional[Tensor] = None, + y0: Optional[Tensor] = None, + q: Optional[Tensor] = None, + phi: Optional[Tensor] = None, + p: Optional[Tensor] = None, + **kwargs, + ) -> Tensor: + """ + Calculate the dimensionless convergence of the lens at a given position. + + Parameters + ---------- + name : str + The name of the lens. + + cosmology : Cosmology + The cosmology object that describes the Universe. + + z_l : float + The redshift of the lens. + + *Unit: unitless* + + x0 : float or Tensor, optional + The x-coordinate of the lens center. + + *Unit: arcsec* + + y0 : float or Tensor, optional + The y-coordinate of the lens center. + + *Unit: arcsec* + + q : float or Tensor, optional + The axis ratio of the lens. ratio of semi-minor to semi-major axis (b/a). + + *Unit: unitless* + + phi : float or Tensor, optional + The position angle of the lens. + + *Unit: radians* + + p : list[float] or Tensor, optional + The parameters for the enclosed mass function. + + *Unit: user-defined* + + Returns + ------- + The dimensionless convergence at the given position. [Tensor] + + *Unit: unitless* + """ + + csd = self.cosmology.critical_surface_density(z_l, z_s, params) + return convergence_enclosed_mass( + x0, + y0, + q, + phi, + lambda r: self.enclosed_mass(r, p), + x, + y, + csd, + self.s, + ) diff --git a/src/caustics/lenses/func/__init__.py b/src/caustics/lenses/func/__init__.py index 6357d21f..f9bfc03c 100644 --- a/src/caustics/lenses/func/__init__.py +++ b/src/caustics/lenses/func/__init__.py @@ -2,6 +2,7 @@ forward_raytrace, physical_from_reduced_deflection_angle, reduced_from_physical_deflection_angle, + time_delay_arcsec2_to_days, ) from .sie import reduced_deflection_angle_sie, potential_sie, convergence_sie from .point import reduced_deflection_angle_point, potential_point, convergence_point @@ -52,11 +53,16 @@ M0_totmass_tnfw, concentration_tnfw, ) +from .enclosed_mass import ( + physical_deflection_angle_enclosed_mass, + convergence_enclosed_mass, +) __all__ = ( "forward_raytrace", "physical_from_reduced_deflection_angle", "reduced_from_physical_deflection_angle", + "time_delay_arcsec2_to_days", "reduced_deflection_angle_sie", "potential_sie", "convergence_sie", @@ -102,4 +108,6 @@ "M0_scalemass_tnfw", "M0_totmass_tnfw", "concentration_tnfw", + "physical_deflection_angle_enclosed_mass", + "convergence_enclosed_mass", ) diff --git a/src/caustics/lenses/func/base.py b/src/caustics/lenses/func/base.py index 3a67716f..e86a579b 100644 --- a/src/caustics/lenses/func/base.py +++ b/src/caustics/lenses/func/base.py @@ -1,6 +1,7 @@ import torch from ...utils import batch_lm +from ...constants import arcsec_to_rad, c_Mpc_s, days_to_seconds def forward_raytrace(bx, by, raytrace, epsilon, n_init, fov): @@ -50,7 +51,10 @@ def forward_raytrace(bx, by, raytrace, epsilon, n_init, fov): bxy = torch.stack((bx, by)).repeat(n_init, 1) # has shape (n_init, Dout:2) # Random starting points in image plane - guesses = (torch.as_tensor(fov) * (torch.rand(n_init, 2) - 0.5)).to( + guesses = ( + torch.as_tensor(fov, dtype=bx.dtype) + * (torch.rand(n_init, 2, dtype=bx.dtype) - 0.5) + ).to( device=bxy.device ) # Has shape (n_init, Din:2) @@ -161,3 +165,12 @@ def reduced_from_physical_deflection_angle(ax, ay, d_s, d_ls): """ return (d_ls / d_s) * ax, (d_ls / d_s) * ay + + +def time_delay_arcsec2_to_days(d_l, d_s, d_ls, z_l): + """ + Computes a scaling factor to use in time delay calculations which converts + the time delay (i.e. potential and deflection angle squared terms) from + arcsec^2 to units of days. + """ + return (1 + z_l) / c_Mpc_s * d_s * d_l / d_ls * arcsec_to_rad**2 / days_to_seconds diff --git a/src/caustics/lenses/func/enclosed_mass.py b/src/caustics/lenses/func/enclosed_mass.py new file mode 100644 index 00000000..71eb8e53 --- /dev/null +++ b/src/caustics/lenses/func/enclosed_mass.py @@ -0,0 +1,124 @@ +from ...utils import translate_rotate, derotate +from ...constants import G_over_c2 +import torch + + +def physical_deflection_angle_enclosed_mass(x0, y0, q, phi, enclosed_mass, x, y, s=0.0): + """ + Calculate the reduced deflection angle for a lens with an enclosed mass + profile. See the Meneghetti lecture notes Equation 3.19 for the physical + deflection angle. + + Parameters + ---------- + x0: Tensor + The x-coordinate of the lens center. + + *Unit: arcsec* + + y0: Tensor + The y-coordinate of the lens center. + + *Unit: arcsec* + + q: Tensor + The axis ratio of the lens. ratio of semi-minor to semi-major axis (b/a). + + *Unit: unitless* + + phi: Tensor + The position angle of the lens. The angle relative to the positive x-axis. + + *Unit: radians* + + enclosed_mass: Callable + The enclosed mass profile function, solely a function of r. + + x: Tensor + The x-coordinate of the lens. + + *Unit: arcsec* + + y: Tensor + The y-coordinate of the lens. + + *Unit: arcsec* + + z_s: Tensor + The redshift of the source. + + *Unit: unitless* + + Returns + ------- + tuple[Tensor, Tensor] + The physical deflection angle. + """ + x, y = translate_rotate(x, y, x0, y0, phi) + r = (x**2 / q + q * y**2).sqrt() + s + alpha = 4 * G_over_c2 * enclosed_mass(r) / r**2 + ax = alpha * x / (q * r) + ay = alpha * y * q / r + return derotate(ax, ay, phi) + + +def convergence_enclosed_mass( + x0, y0, q, phi, enclosed_mass, x, y, critical_surface_density, s=0.0 +): + """ + Calculate the convergence for a lens with an enclosed mass profile. See the + Meneghetti lecture notes Equation 3.28 for the convergence from an enclosed + mass profile. + + Parameters + ---------- + x0: Tensor + The x-coordinate of the lens center. + + *Unit: arcsec* + + y0: Tensor + The y-coordinate of the lens center. + + *Unit: arcsec* + + q: Tensor + The axis ratio of the lens. ratio of semi-minor to semi-major axis (b/a). + + *Unit: unitless* + + phi: Tensor + The position angle of the lens. The angle relative to the positive x-axis. + + *Unit: radians* + + enclosed_mass: Callable + The enclosed mass profile function, solely a function of r. + + x: Tensor + The x-coordinate of the lens. + + *Unit: arcsec* + + y: Tensor + The y-coordinate of the lens. + + *Unit: arcsec* + + z_s: Tensor + The redshift of the source. + + *Unit: unitless* + + Returns + ------- + Tensor + The convergence. + """ + x, y = translate_rotate(x, y, x0, y0, phi) + r = (x**2 / q + q * y**2).sqrt() + s + return ( + 0.5 + * torch.vmap(torch.func.grad(enclosed_mass))(r.reshape(-1)).reshape(r.shape) + / (r * torch.pi * critical_surface_density) + ) diff --git a/src/caustics/lenses/func/pixelated_convergence.py b/src/caustics/lenses/func/pixelated_convergence.py index f199e8a0..86e3f5fe 100644 --- a/src/caustics/lenses/func/pixelated_convergence.py +++ b/src/caustics/lenses/func/pixelated_convergence.py @@ -2,7 +2,7 @@ import torch.nn.functional as F from scipy.fft import next_fast_len -from ...utils import safe_divide, safe_log, get_meshgrid, interp2d +from ...utils import safe_divide, safe_log, meshgrid, interp2d def build_kernels_pixelated_convergence(pixelscale, n_pix): @@ -34,7 +34,7 @@ def build_kernels_pixelated_convergence(pixelscale, n_pix): *Unit: unitless* """ - x_mg, y_mg = get_meshgrid(pixelscale, 2 * n_pix, 2 * n_pix) + x_mg, y_mg = meshgrid(pixelscale, 2 * n_pix) # Shift to center kernels within pixel at index n_pix x_mg = x_mg - pixelscale / 2 y_mg = y_mg - pixelscale / 2 diff --git a/src/caustics/lenses/multiplane.py b/src/caustics/lenses/multiplane.py index a658c7bb..3e74c1fa 100644 --- a/src/caustics/lenses/multiplane.py +++ b/src/caustics/lenses/multiplane.py @@ -4,7 +4,7 @@ import torch from torch import Tensor -from ..constants import arcsec_to_rad, rad_to_arcsec, c_Mpc_s +from ..constants import arcsec_to_rad, rad_to_arcsec, c_Mpc_s, days_to_seconds from .base import ThickLens, NameType, CosmologyType, LensesType from ..parametrized import unpack from ..packed import Packed @@ -88,7 +88,8 @@ def _raytrace_helper( theta_x, theta_y = x, y # Store the time delays - TD = torch.zeros_like(x) + if shapiro_time_delay or geometric_time_delay: + TD = torch.zeros_like(x) for i in lens_planes: z_next = z_ls[i + 1] if i != lens_planes[-1] else z_s @@ -113,7 +114,7 @@ def _raytrace_helper( theta_y = theta_y - alpha_y # Compute time delay - tau_ij = (1 + z_ls[i]) * D_l * D_next / (D * c_Mpc_s) + tau_ij = (1 + z_ls[i]) * D_l * D_next / (D * c_Mpc_s) / days_to_seconds if shapiro_time_delay: beta_ij = D * D_s / (D_next * D_is) potential = self.lenses[i].potential( @@ -344,7 +345,7 @@ def time_delay( Tensor Time delay caused by the lensing. - *Unit: seconds* + *Unit: days* References ---------- diff --git a/src/caustics/lenses/nfw.py b/src/caustics/lenses/nfw.py index 91559bf6..8c03740b 100644 --- a/src/caustics/lenses/nfw.py +++ b/src/caustics/lenses/nfw.py @@ -176,7 +176,7 @@ def __init__( def get_scale_radius( self, *args, - params: Optional["Packed"] = None, + params: Optional[Packed] = None, z_l: Optional[Tensor] = None, x0: Optional[Tensor] = None, y0: Optional[Tensor] = None, diff --git a/src/caustics/lenses/pixelated_convergence.py b/src/caustics/lenses/pixelated_convergence.py index eed1925e..1003f758 100644 --- a/src/caustics/lenses/pixelated_convergence.py +++ b/src/caustics/lenses/pixelated_convergence.py @@ -24,7 +24,6 @@ class PixelatedConvergence(ThinLens): def __init__( self, pixelscale: Annotated[float, "pixelscale"], - n_pix: Annotated[int, "The number of pixels on each side of the grid"], cosmology: CosmologyType, z_l: ZLType = None, x0: Annotated[ @@ -78,10 +77,6 @@ def __init__( *Unit: arcsec* - n_pix: int - The number of pixels on each side of the grid. - - cosmology: Cosmology An instance of the cosmological parameters. @@ -145,7 +140,10 @@ def __init__( self.add_param("y0", y0) self.add_param("convergence_map", convergence_map, shape) - self.n_pix = n_pix + if convergence_map is not None: + self.n_pix = convergence_map.shape[0] + elif shape is not None: + self.n_pix = shape[0] self.pixelscale = pixelscale self.fov = self.n_pix * self.pixelscale self.use_next_fast_len = use_next_fast_len @@ -403,11 +401,6 @@ def convergence( *Unit: unitless* - Raises - ------ - NotImplementedError - This method is not implemented. - """ return interp2d( convergence_map, diff --git a/src/caustics/lenses/pixelated_potential.py b/src/caustics/lenses/pixelated_potential.py new file mode 100644 index 00000000..816176c7 --- /dev/null +++ b/src/caustics/lenses/pixelated_potential.py @@ -0,0 +1,288 @@ +# mypy: disable-error-code="index,dict-item" +from typing import Optional, Annotated, Union + +import torch +from torch import Tensor +import numpy as np + +from ..utils import interp_bicubic +from .base import ThinLens, CosmologyType, NameType, ZLType +from ..parametrized import unpack +from ..packed import Packed + +__all__ = ("PixelatedPotential",) + + +class PixelatedPotential(ThinLens): + _null_params = { + "x0": 0.0, + "y0": 0.0, + "potential_map": np.logspace(0, 1, 100, dtype=np.float32).reshape(10, 10), + } + + def __init__( + self, + pixelscale: Annotated[float, "pixelscale"], + cosmology: CosmologyType, + z_l: ZLType = None, + x0: Annotated[ + Optional[Union[Tensor, float]], + "The x-coordinate of the center of the grid", + True, + ] = torch.tensor(0.0), + y0: Annotated[ + Optional[Union[Tensor, float]], + "The y-coordinate of the center of the grid", + True, + ] = torch.tensor(0.0), + potential_map: Annotated[ + Optional[Tensor], + "A 2D tensor representing the potential map", + True, + ] = None, + shape: Annotated[ + Optional[tuple[int, ...]], "The shape of the potential map" + ] = None, + name: NameType = None, + ): + """Strong lensing with user provided kappa map + + PixelatedConvergence is a class for strong gravitational lensing with a + user-provided kappa map. It inherits from the ThinLens class. + This class enables the computation of deflection angles and + lensing potential by applying the user-provided kappa map to a + grid using either Fast Fourier Transform (FFT) or a 2D + convolution. + + Attributes + ---------- + name: string + The name of the PixelatedConvergence object. + + fov: float + The field of view in arcseconds. + + *Unit: arcsec* + + cosmology: Cosmology + An instance of the cosmological parameters. + + z_l: Optional[Tensor] + The redshift of the lens. + + *Unit: unitless* + + x0: Optional[Tensor] + The x-coordinate of the center of the grid. + + *Unit: arcsec* + + y0: Optional[Tensor] + The y-coordinate of the center of the grid. + + *Unit: arcsec* + + potential_map: Optional[Tensor] + A 2D tensor representing the potential map. + + *Unit: unitless* + + shape: Optional[tuple[int, ...]] + The shape of the potential map. + + """ + + super().__init__(cosmology, z_l, name=name) + + if potential_map is not None and potential_map.ndim != 2: + raise ValueError( + f"potential_map must be 2D. Received a {potential_map.ndim}D tensor)" + ) + elif shape is not None and len(shape) != 2: + raise ValueError(f"shape must specify a 2D tensor. Received shape={shape}") + + self.add_param("x0", x0) + self.add_param("y0", y0) + self.add_param("potential_map", potential_map, shape) + + self.pixelscale = pixelscale + if potential_map is not None: + self.n_pix = potential_map.shape[0] + elif shape is not None: + self.n_pix = shape[0] + else: + raise ValueError("Either potential_map or shape must be provided") + self.fov = self.n_pix * self.pixelscale + + @unpack + def reduced_deflection_angle( + self, + x: Tensor, + y: Tensor, + z_s: Tensor, + *args, + params: Optional["Packed"] = None, + z_l: Optional[Tensor] = None, + x0: Optional[Tensor] = None, + y0: Optional[Tensor] = None, + potential_map: Optional[Tensor] = None, + **kwargs, + ) -> tuple[Tensor, Tensor]: + """ + Compute the deflection angles at the specified positions using the given convergence map. + + Parameters + ---------- + x: Tensor + The x-coordinates of the positions to compute the deflection angles for. + + *Unit: arcsec* + + y: Tensor + The y-coordinates of the positions to compute the deflection angles for. + + *Unit: arcsec* + + z_s: Tensor + The source redshift. + + *Unit: unitless* + + params: Packed, optional + A dictionary containing additional parameters. + + Returns + ------- + x_component: Tensor + Deflection Angle in the x-direction. + + *Unit: arcsec* + + y_component: Tensor + Deflection Angle in the y-direction. + + *Unit: arcsec* + + """ + # TODO: rescale from fov units to arcsec + return tuple( + alpha.reshape(x.shape) / self.pixelscale + for alpha in interp_bicubic( + (x - x0).view(-1) / self.fov * 2, + (y - y0).view(-1) / self.fov * 2, + potential_map, + get_Y=False, + get_dY=True, + get_ddY=False, + ) + ) + + @unpack + def potential( + self, + x: Tensor, + y: Tensor, + z_s: Tensor, + *args, + params: Optional["Packed"] = None, + z_l: Optional[Tensor] = None, + x0: Optional[Tensor] = None, + y0: Optional[Tensor] = None, + potential_map: Optional[Tensor] = None, + **kwargs, + ) -> Tensor: + """ + Compute the lensing potential at the specified positions using the given convergence map. + + Parameters + ---------- + x: Tensor + The x-coordinates of the positions to compute the lensing potential for. + + *Unit: arcsec* + + y: Tensor + The y-coordinates of the positions to compute the lensing potential for. + + *Unit: arcsec* + + z_s: Tensor + The source redshift. + + *Unit: unitless* + + params: (Packed, optional) + A dictionary containing additional parameters. + + Returns + ------- + Tensor + The lensing potential at the specified positions. + + *Unit: arcsec^2* + + """ + return interp_bicubic( + (x - x0).view(-1) / self.fov * 2, + (y - y0).view(-1) / self.fov * 2, + potential_map, + get_Y=True, + get_dY=False, + get_ddY=False, + )[0].reshape(x.shape) + + @unpack + def convergence( + self, + x: Tensor, + y: Tensor, + z_s: Tensor, + *args, + params: Optional["Packed"] = None, + z_l: Optional[Tensor] = None, + x0: Optional[Tensor] = None, + y0: Optional[Tensor] = None, + potential_map: Optional[Tensor] = None, + **kwargs, + ) -> Tensor: + """ + Compute the convergence at the specified positions. This method is not implemented. + + Parameters + ---------- + x: Tensor + The x-coordinates of the positions to compute the convergence for. + + *Unit: arcsec* + + y: Tensor + The y-coordinates of the positions to compute the convergence for. + + *Unit: arcsec* + + z_s: Tensor + The source redshift. + + *Unit: unitless* + + params: (Packed, optional) + A dictionary containing additional parameters. + + Returns + ------- + Tensor + The convergence at the specified positions. + + *Unit: unitless* + + """ + # TODO: rescale from fov units to arcsec + _, dY11, dY22 = interp_bicubic( + (x - x0).view(-1) / self.fov * 2, + (y - y0).view(-1) / self.fov * 2, + potential_map, + get_Y=False, + get_dY=False, + get_ddY=True, + ) + return (dY11 + dY22).reshape(x.shape) / (2 * self.pixelscale**2) diff --git a/src/caustics/lenses/tnfw.py b/src/caustics/lenses/tnfw.py index 3bd47572..4cc85841 100644 --- a/src/caustics/lenses/tnfw.py +++ b/src/caustics/lenses/tnfw.py @@ -290,7 +290,7 @@ def get_truncation_radius( return tau * scale_radius @unpack - def get_M0( + def M0( self, *args, params: Optional[Packed] = None, @@ -490,7 +490,7 @@ def convergence( d_l = self.cosmology.angular_diameter_distance(z_l, params) critical_density = self.cosmology.critical_surface_density(z_l, z_s, params) - M0 = self.get_M0(params) + M0 = self.M0(params) return func.convergence_tnfw( x0, y0, @@ -567,7 +567,7 @@ def mass_enclosed_2d( """ - M0 = self.get_M0(params) + M0 = self.M0(params) return func.mass_enclosed_2d_tnfw(r, scale_radius, tau, M0, self._F_mode) @unpack @@ -640,7 +640,7 @@ def physical_deflection_angle( """ d_l = self.cosmology.angular_diameter_distance(z_l, params) - M0 = self.get_M0(params) + M0 = self.M0(params) return func.physical_deflection_angle_tnfw( x0, y0, scale_radius, tau, x, y, M0, d_l, self._F_mode, self.s ) @@ -716,7 +716,7 @@ def potential( d_s = self.cosmology.angular_diameter_distance(z_s, params) d_ls = self.cosmology.angular_diameter_distance_z1z2(z_l, z_s, params) - M0 = self.get_M0(params) + M0 = self.M0(params) return func.potential_tnfw( x0, y0, scale_radius, tau, x, y, M0, d_l, d_s, d_ls, self._F_mode, self.s ) diff --git a/src/caustics/lenses/utils.py b/src/caustics/lenses/utils.py index fab3e8aa..87c1ade5 100644 --- a/src/caustics/lenses/utils.py +++ b/src/caustics/lenses/utils.py @@ -3,12 +3,11 @@ import torch from torch import Tensor -from ..utils import vmap_n -__all__ = ("get_pix_jacobian", "get_pix_magnification", "get_magnification") +__all__ = ("pixel_jacobian", "pixel_magnification", "magnification") -def get_pix_jacobian( +def pixel_jacobian( raytrace, x, y, z_s ) -> Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]: """Computes the Jacobian matrix of the partial derivatives of the @@ -47,7 +46,7 @@ def get_pix_jacobian( return jac -def get_pix_magnification(raytrace, x, y, z_s) -> Tensor: +def pixel_magnification(raytrace, x, y, z_s) -> Tensor: """ Computes the magnification at a single point on the lensing plane. The magnification is derived from the determinant @@ -81,14 +80,14 @@ def get_pix_magnification(raytrace, x, y, z_s) -> Tensor: *Unit: unitless* """ - jac = get_pix_jacobian(raytrace, x, y, z_s) + jac = pixel_jacobian(raytrace, x, y, z_s) return 1 / (jac[0][0] * jac[1][1] - jac[0][1] * jac[1][0]).abs() # fmt: skip -def get_magnification(raytrace, x, y, z_s) -> Tensor: +def magnification(raytrace, x, y, z_s) -> Tensor: """ Computes the magnification over a grid on the lensing plane. - This is done by calling `get_pix_magnification` + This is done by calling `pixel_magnification` for each point on the grid. Parameters @@ -119,4 +118,9 @@ def get_magnification(raytrace, x, y, z_s) -> Tensor: *Unit: unitless* """ - return vmap_n(get_pix_magnification, 2, (None, 0, 0, None))(raytrace, x, y, z_s) + return torch.reshape( + torch.func.vmap(pixel_magnification, in_dims=(None, 0, 0, None))( + raytrace, x.reshape(-1), y.reshape(-1), z_s + ), + x.shape, + ) diff --git a/src/caustics/light/__init__.py b/src/caustics/light/__init__.py index 0f254fbb..80d1932c 100644 --- a/src/caustics/light/__init__.py +++ b/src/caustics/light/__init__.py @@ -1,7 +1,6 @@ from .base import Source from .pixelated import Pixelated -from .probes import PROBESDataset from .sersic import Sersic from .pixelated_time import PixelatedTime -__all__ = ["Source", "Pixelated", "PixelatedTime", "PROBESDataset", "Sersic"] +__all__ = ["Source", "Pixelated", "PixelatedTime", "Sersic"] diff --git a/src/caustics/light/probes.py b/src/caustics/light/probes.py deleted file mode 100644 index e315a3d9..00000000 --- a/src/caustics/light/probes.py +++ /dev/null @@ -1,29 +0,0 @@ -import h5py -import numpy as np -import torch -from torch.utils.data import Dataset - -__all__ = ("PROBESDataset",) - - -# TODO: move elsewhere? -class PROBESDataset(Dataset): - def __init__(self, filepath, channels): - self.filepath = filepath - self.channels = list(np.sort(np.atleast_1d(channels))) - with h5py.File(self.filepath, "r") as hf: - self.size = hf["galaxies"].shape[0] - - def __len__(self): - return self.size - - def __getitem__(self, index): - with h5py.File(self.filepath, "r") as hf: - # Move channel first - return torch.movedim( - torch.tensor( - hf["galaxies"][index][..., self.channels], dtype=torch.float32 - ), - -1, - -3, - ) diff --git a/src/caustics/models/registry.py b/src/caustics/models/registry.py index 51311483..9ce811d3 100644 --- a/src/caustics/models/registry.py +++ b/src/caustics/models/registry.py @@ -30,7 +30,7 @@ class _KindRegistry(MutableMapping[str, "Parametrized | str"]): "Pixelated": "caustics.light.pixelated.Pixelated", "Sersic": "caustics.light.sersic.Sersic", } - simulators = {"Lens_Source": "caustics.sims.lens_source.Lens_Source"} + simulators = {"LensSource": "caustics.sims.lens_source.LensSource"} known_kinds = { **cosmology, diff --git a/src/caustics/models/utils.py b/src/caustics/models/utils.py index 0ba84f82..27060b75 100644 --- a/src/caustics/models/utils.py +++ b/src/caustics/models/utils.py @@ -272,7 +272,7 @@ def setup_simulator_models() -> type[Annotated]: # there's currently only one simulator # in the system. dependents = { - "Lens_Source": { + "LensSource": { "source": light_sources, "lens_light": light_sources, "lens": lenses, diff --git a/src/caustics/parametrized.py b/src/caustics/parametrized.py index e7ac56eb..205bf954 100644 --- a/src/caustics/parametrized.py +++ b/src/caustics/parametrized.py @@ -457,7 +457,7 @@ def __str__(self) -> str: f")" ) - def get_graph( + def graph( self, show_dynamic_params: bool = False, show_static_params: bool = False ) -> "graphviz.Digraph": # type: ignore """ @@ -476,9 +476,8 @@ def get_graph( The graph representation of the object. """ - def add_component(p: Parametrized, dot): - dot.attr("node", style="solid", color="black", shape="ellipse") - dot.node(p.name, f"{p.__class__.__name__}('{p.name}')") + components = {} + params = [] def add_params(p: Parametrized, dot): static = p.module_params.static.keys() @@ -486,29 +485,37 @@ def add_params(p: Parametrized, dot): dot.attr("node", style="solid", color="black", shape="box") for n in dynamic: + pname = f"{p.name}/{n}" + if pname in params: + continue + params.append(pname) if show_dynamic_params: - dot.node(f"{p.name}/{n}", n) - dot.edge(p.name, f"{p.name}/{n}") + dot.node(pname, n) + dot.edge(p.name, pname) dot.attr("node", style="filled", color="lightgrey", shape="box") for n in static: + pname = f"{p.name}/{n}" + if pname in params: + continue + params.append(pname) if show_static_params: - dot.node(f"{p.name}/{n}", n) - dot.edge(p.name, f"{p.name}/{n}") + dot.node(pname, n) + dot.edge(p.name, pname) + + def add_component(p: Parametrized, dot): + if p.name in components: + return + dot.attr("node", style="solid", color="black", shape="ellipse") + dot.node(p.name, f"{p.__class__.__name__}('{p.name}')") + components[p.name] = p + add_params(p, dot) + for child in p._childs.values(): + add_component(child, dot) + dot.edge(p.name, child.name) dot = graphviz.Digraph(strict=True) add_component(self, dot) - add_params(self, dot) - - for child in self._childs.values(): - add_component(child, dot) - - for parent in child._parents.values(): - if parent.name not in self._childs and parent.name != self.name: - continue - add_component(parent, dot) - dot.edge(parent.name, child.name) - add_params(child, dot) return dot diff --git a/src/caustics/sims/__init__.py b/src/caustics/sims/__init__.py index 812d627e..ca28e5f1 100644 --- a/src/caustics/sims/__init__.py +++ b/src/caustics/sims/__init__.py @@ -1,4 +1,5 @@ -from .lens_source import Lens_Source +from .lens_source import LensSource +from .microlens import Microlens from .simulator import Simulator -__all__ = ["Lens_Source", "Simulator"] +__all__ = ("LensSource", "Microlens", "Simulator") diff --git a/src/caustics/sims/lens_source.py b/src/caustics/sims/lens_source.py index 2844ea53..05a13d7d 100644 --- a/src/caustics/sims/lens_source.py +++ b/src/caustics/sims/lens_source.py @@ -8,7 +8,7 @@ from .simulator import Simulator, NameType from ..utils import ( - get_meshgrid, + meshgrid, gaussian_quadrature_grid, gaussian_quadrature_integrator, ) @@ -16,10 +16,10 @@ from ..light.base import Source -__all__ = ("Lens_Source",) +__all__ = ("LensSource",) -class Lens_Source(Simulator): +class LensSource(Simulator): """Lens image of a source. Straightforward simulator to sample a lensed image of a source @@ -37,7 +37,7 @@ class Lens_Source(Simulator): cosmo = caustics.FlatLambdaCDM() lens = caustics.lenses.SIS(cosmology = cosmo, x0 = 0., y0 = 0., th_ein = 1.) source = caustics.sources.Sersic(x0 = 0., y0 = 0., q = 0.5, phi = 0.4, n = 2., Re = 1., Ie = 1.) - sim = caustics.sims.Lens_Source(lens, source, pixelscale = 0.05, gridx = 100, gridy = 100, upsample_factor = 2, z_s = 1.) + sim = caustics.sims.LensSource(lens, source, pixelscale = 0.05, gridx = 100, gridy = 100, upsample_factor = 2, z_s = 1.) img = sim() plt.imshow(img, origin = "lower") @@ -103,6 +103,16 @@ def __init__( z_s: Annotated[ Optional[Union[Tensor, float]], "Redshift of the source", True ] = None, + x0: Annotated[ + Optional[Union[Tensor, float]], + "center of the fov for the lens source image", + True, + ] = 0.0, + y0: Annotated[ + Optional[Union[Tensor, float]], + "center of the fov for the lens source image", + True, + ] = 0.0, name: NameType = "sim", ): super().__init__(name) @@ -117,6 +127,8 @@ def __init__( self.psf = torch.as_tensor(psf) self.psf /= psf.sum() # ensure normalized self.add_param("z_s", z_s) + self.add_param("x0", x0) + self.add_param("y0", y0) self.pixelscale = pixelscale # Image grid @@ -137,7 +149,7 @@ def __init__( self.gridding[0] + self.psf_pad[0] * 2, self.gridding[1] + self.psf_pad[1] * 2, ) - self.grid = get_meshgrid( + self.grid = meshgrid( pixelscale / self.upsample_factor, self.n_pix[0] * self.upsample_factor, self.n_pix[1] * self.upsample_factor, @@ -217,7 +229,7 @@ def forward( psf_convolve: boolean when true the image will be convolved with the psf """ - (z_s,) = self.unpack(params) + z_s, x0, y0 = self.unpack(params) # Automatically turn off light for missing objects if self.source is None: @@ -227,9 +239,11 @@ def forward( if self.psf is None: psf_convolve = False + grid = (self.grid[0] + x0, self.grid[1] + y0) + if quad_level is not None and quad_level > 1: finegrid_x, finegrid_y, weights = gaussian_quadrature_grid( - self.pixelscale / self.upsample_factor, *self.grid, quad_level + self.pixelscale / self.upsample_factor, *grid, quad_level ) # Sample the source light @@ -241,7 +255,7 @@ def forward( mu_fine = self.source.brightness(bx, by, params) mu = gaussian_quadrature_integrator(mu_fine, weights) else: - bx, by = self.lens.raytrace(*self.grid, z_s, params) + bx, by = self.lens.raytrace(*grid, z_s, params) mu = self.source.brightness(bx, by, params) else: # Source is imaged without lensing @@ -249,10 +263,10 @@ def forward( mu_fine = self.source.brightness(finegrid_x, finegrid_y, params) mu = gaussian_quadrature_integrator(mu_fine, weights) else: - mu = self.source.brightness(*self.grid, params) + mu = self.source.brightness(*grid, params) else: # Source is not added to the scene - mu = torch.zeros_like(self.grid[0]) + mu = torch.zeros_like(grid[0]) # Sample the lens light if lens_light and self.lens_light is not None: @@ -260,7 +274,7 @@ def forward( mu_fine = self.lens_light.brightness(finegrid_x, finegrid_y, params) mu += gaussian_quadrature_integrator(mu_fine, weights) else: - mu += self.lens_light.brightness(*self.grid, params) + mu += self.lens_light.brightness(*grid, params) # Convolve the PSF if psf_convolve and self.psf is not None: diff --git a/src/caustics/sims/microlens.py b/src/caustics/sims/microlens.py new file mode 100644 index 00000000..1cfd4190 --- /dev/null +++ b/src/caustics/sims/microlens.py @@ -0,0 +1,117 @@ +from typing import Optional, Annotated, Union, Literal +import torch +from torch import Tensor + +from .simulator import Simulator, NameType +from ..lenses.base import Lens +from ..light.base import Source + + +__all__ = ("Microlens",) + + +class Microlens(Simulator): + """Computes the total flux from a microlens system within an fov. + + Straightforward simulator to compute the total flux a lensed image of a + source object within a given field of view. Constructs a sampling points + internally based on the user settings. + + Example usage:: python + + import matplotlib.pyplot as plt + import torch + import caustics + + cosmo = caustics.FlatLambdaCDM() + lens = caustics.lenses.SIS(cosmology = cosmo, x0 = 0., y0 = 0., th_ein = 1.) + source = caustics.sources.Sersic(x0 = 0., y0 = 0., q = 0.5, phi = 0.4, n = 2., Re = 1., Ie = 1.) + sim = caustics.sims.Microlens(lens, source, z_s = 1.) + + fov = torch.tensor([-1., 1., -1., 1.]) + print("Flux and uncertainty: ", sim(fov=fov)) + + Attributes + ---------- + lens: Lens + caustics lens mass model object + source: Source + caustics light object which defines the background source + z_s: optional + redshift of the source + name: string (default "sim") + a name for this simulator in the parameter DAG. + + """ # noqa: E501 + + def __init__( + self, + lens: Annotated[Lens, "caustics lens mass model object"], + source: Annotated[ + Source, "caustics light object which defines the background source" + ], + z_s: Annotated[ + Optional[Union[Tensor, float]], "Redshift of the source", True + ] = None, + name: NameType = "sim", + ): + super().__init__(name) + + self.lens = lens + self.source = source + + self.add_param("z_s", z_s) + + def forward( + self, + params, + fov: Tensor, + method: Literal["mcmc", "grid"] = "mcmc", + N_mcmc: int = 10000, + N_grid: int = 100, + ): + """Forward pass of the simulator. + + Parameters + ---------- + params: dict + Dictionary of parameters for the simulator + fov: Tensor + Field of view box of the simulation in arcseconds indexed as (x_min, x_max, y_min, y_max) + method: str (default "mcmc") + Method for sampling the image. Choose from "mcmc" or "grid" + N_mcmc: int + Number of sample points for the source sampling if method is "mcmc" + N_grid: int + Number of sample points for the sampling grid on each axis if method is "grid" + + Returns + ------- + Tensor + Total flux from the microlens system within the field of view + + Tensor + Error estimate on the total flux + + """ + (z_s,) = self.unpack(params) + + if method == "mcmc": + # Sample the source using MCMC + sample_x = torch.rand(N_mcmc) * (fov[1] - fov[0]) + fov[0] + sample_y = torch.rand(N_mcmc) * (fov[3] - fov[2]) + fov[2] + bx, by = self.lens.raytrace(sample_x, sample_y, z_s, params) + mu = self.source.brightness(bx, by, params) + A = (fov[1] - fov[0]) * (fov[3] - fov[2]) + return mu.mean() * A, mu.std() * A / N_mcmc**0.5 + elif method == "grid": + # Sample the source using a grid + x = torch.linspace(fov[0], fov[1], N_grid) + y = torch.linspace(fov[2], fov[3], N_grid) + sample_x, sample_y = torch.meshgrid(x, y, indexing="ij") + bx, by = self.lens.raytrace(sample_x, sample_y, z_s, params) + mu = self.source.brightness(bx, by, params) + A = (fov[1] - fov[0]) * (fov[3] - fov[2]) + return mu.mean() * A, mu.std() * A / N_grid + else: + raise ValueError(f"Invalid method: {method}, choose from 'mcmc' or 'grid'") diff --git a/src/caustics/tests.py b/src/caustics/tests.py index 7456e61d..15a90457 100644 --- a/src/caustics/tests.py +++ b/src/caustics/tests.py @@ -2,11 +2,11 @@ import torch -from caustics.sims import Lens_Source +from caustics.sims import LensSource from caustics.cosmology import FlatLambdaCDM from caustics.lenses import SIE, Multiplane from caustics.light import Sersic -from caustics.utils import gaussian, get_meshgrid +from caustics.utils import gaussian, meshgrid __all__ = ["test"] @@ -41,7 +41,7 @@ def _test_simulator_runs(device=DEVICE): psf = gaussian(0.05, 11, 11, 0.2, upsample=2, device=device) - sim = Lens_Source( + sim = LensSource( lens=lensmass, source=source, pixelscale=0.05, @@ -105,7 +105,7 @@ def _test_jacobian_autograd_vs_finitediff(device=DEVICE): # Models cosmology = FlatLambdaCDM(name="cosmo") lens = SIE(name="sie", cosmology=cosmology) - thx, thy = get_meshgrid(0.01, 20, 20, device=device) + thx, thy = meshgrid(0.01, 20, device=device) # Parameters z_s = torch.tensor(1.2, device=device) @@ -149,7 +149,7 @@ def _test_multiplane_jacobian(device=DEVICE): # Send to device lens = lens.to(device=device) - thx, thy = get_meshgrid(0.1, 10, 10, device=device) + thx, thy = meshgrid(0.1, 10, device=device) # Parameters z_s = torch.tensor(1.2, device=device) @@ -181,7 +181,7 @@ def _test_multiplane_jacobian_autograd_vs_finitediff(device=DEVICE): # Send to device lens = lens.to(device=device) - thx, thy = get_meshgrid(0.01, 10, 10, device=device) + thx, thy = meshgrid(0.01, 10, device=device) # Parameters z_s = torch.tensor(1.2, device=device) @@ -222,7 +222,7 @@ def _test_multiplane_effective_convergence(device=DEVICE): # Send to device lens = lens.to(device=device) - thx, thy = get_meshgrid(0.1, 10, 10, device=device) + thx, thy = meshgrid(0.1, 10, device=device) # Parameters z_s = torch.tensor(1.2, device=device) diff --git a/src/caustics/utils.py b/src/caustics/utils.py index 4b25e8f2..ccbe24d5 100644 --- a/src/caustics/utils.py +++ b/src/caustics/utils.py @@ -1,6 +1,6 @@ -# mypy: disable-error-code="misc" +# mypy: disable-error-code="misc", disable-error-code="attr-defined" from math import pi -from typing import Callable, Optional, Tuple, Union, Any +from typing import Callable, Optional, Tuple, Union, Any, Literal from importlib import import_module from functools import partial, lru_cache @@ -166,8 +166,8 @@ def to_elliptical(x, y, q: Tensor): return x, y / q -def get_meshgrid( - pixelscale, nx, ny, device=None, dtype=torch.float32 +def meshgrid( + pixelscale, nx, ny=None, device=None, dtype=torch.float32 ) -> Tuple[Tensor, Tensor]: """ Generates a 2D meshgrid based on the provided pixelscale and dimensions. @@ -190,6 +190,8 @@ def get_meshgrid( Tuple: [Tensor, Tensor] The generated meshgrid as a tuple of Tensors. """ + if ny is None: + ny = nx xs = torch.linspace(-1, 1, nx, device=device, dtype=dtype) * pixelscale * (nx - 1) / 2 # fmt: skip ys = torch.linspace(-1, 1, ny, device=device, dtype=dtype) * pixelscale * (ny - 1) / 2 # fmt: skip return torch.meshgrid([xs, ys], indexing="xy") @@ -257,7 +259,7 @@ def gaussian_quadrature_grid( ------- Usage would look something like:: python - X, Y = get_meshgrid(pixelscale, nx, ny) + X, Y = meshgrid(pixelscale, nx, ny) Xs, Ys, weight = gaussian_quadrature_grid(pixelscale, X, Y, quad_level) F = your_brightness_function(Xs, Ys, other, parameters) res = gaussian_quadrature_integrator(F, weight) @@ -302,7 +304,7 @@ def gaussian_quadrature_integrator( ------- Usage would look something like:: python - X, Y = get_meshgrid(pixelscale, nx, ny) + X, Y = meshgrid(pixelscale, nx, ny) Xs, Ys, weight = gaussian_quadrature_grid(pixelscale, X, Y, quad_level) F = your_brightness_function(Xs, Ys, other, parameters) res = gaussian_quadrature_integrator(F, weight) @@ -316,7 +318,7 @@ def quad( pixelscale: float, X: Tensor, Y: Tensor, - args: Optional[Tuple] = None, + args: Tuple = (), quad_level: int = 3, ): """ @@ -415,7 +417,12 @@ def _h_poly(t): return A @ tt -def interp1d(x: Tensor, y: Tensor, xs: Tensor, extend: str = "extrapolate") -> Tensor: +def interp1d( + x: Tensor, + y: Tensor, + xs: Tensor, + extend: Literal["extrapolate", "const", "linear"] = "extrapolate", +) -> Tensor: """Compute the 1D cubic spline interpolation for the given data points using PyTorch. @@ -459,7 +466,7 @@ def interp2d( im: Tensor, x: Tensor, y: Tensor, - method: str = "linear", + method: Literal["linear", "nearest"] = "linear", padding_mode: str = "zeros", ) -> Tensor: """ @@ -550,8 +557,8 @@ def interp3d( x: Tensor, y: Tensor, t: Tensor, - method: str = "linear", - padding_mode: str = "zeros", + method: Literal["linear", "nearest"] = "linear", + padding_mode: Literal["zeros", "extrapolate"] = "zeros", ) -> Tensor: """ Interpolates a 3D image at specified coordinates. @@ -595,10 +602,7 @@ def interp3d( """ if cu.ndim != 3: raise ValueError(f"im must be 3D (received {cu.ndim}D tensor)") - if x.ndim > 1: - raise ValueError(f"x must be 0 or 1D (received {x.ndim}D tensor)") - if y.ndim > 1: - raise ValueError(f"y must be 0 or 1D (received {y.ndim}D tensor)") + if t.ndim > 1: raise ValueError(f"t must be 0 or 1D (received {t.ndim}D tensor)") if padding_mode not in ["extrapolate", "zeros"]: @@ -662,6 +666,253 @@ def interp3d( return result +# Bicubic interpolation coefficients +# These are the coefficients for the bicubic interpolation kernel. +# To quote numerical recipes: +# The formulas that obtain the c’s from the function and derivative values +# are just a complicated linear transformation, with coefficients which, +# having been determined once in the mists of numerical history, can be +# tabulated and forgotten +BC = ( + (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0), + (-3, 0, 0, 3, 0, 0, 0, 0, -2, 0, 0, -1, 0, 0, 0, 0), + (2, 0, 0, -2, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0), + (0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0), + (0, 0, 0, 0, -3, 0, 0, 3, 0, 0, 0, 0, -2, 0, 0, -1), + (0, 0, 0, 0, 2, 0, 0, -2, 0, 0, 0, 0, 1, 0, 0, 1), + (-3, 3, 0, 0, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + (0, 0, 0, 0, 0, 0, 0, 0, -3, 3, 0, 0, -2, -1, 0, 0), + (9, -9, 9, -9, 6, 3, -3, -6, 6, -6, -3, 3, 4, 2, 1, 2), + (-6, 6, -6, 6, -4, -2, 2, 4, -3, 3, 3, -3, -2, -1, -1, -2), + (2, -2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + (0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 1, 1, 0, 0), + (-6, 6, -6, 6, -3, -3, 3, 3, -4, 4, 2, -2, -2, -2, -1, -1), + (4, -4, 4, -4, 2, 2, -2, -2, 2, -2, -2, 2, 1, 1, 1, 1), +) + + +def bicubic_kernels(Z, d1, d2): + """ + This is just a quick script to compute the necessary derivatives using + finite differences. This is not the most accurate way to compute the + derivatives, but it is good enough for most purposes. + """ + dZ1 = torch.zeros_like(Z) + dZ2 = torch.zeros_like(Z) + dZ12 = torch.zeros_like(Z) + + # First derivatives on first axis + # df/dx = (f(x+h, y) - f(x-h, y)) / 2h + dZ1[1:-1] = (Z[:-2] - Z[2:]) / (2 * d1) + dZ1[0] = (Z[0] - Z[1]) / d1 + dZ1[-1] = (Z[-2] - Z[-1]) / d1 + # First derivatives on second axis + # df/dy = (f(x,y+h) - f(x,y-h)) / h + dZ2[:, 1:-1] = (Z[:, :-2] - Z[:, 2:]) / (2 * d2) + dZ2[:, 0] = (Z[:, 0] - Z[:, 1]) / d2 + dZ2[:, -1] = (Z[:, -2] - Z[:, -1]) / d2 + + # Second derivatives across both axes + # d2f/dxdy = (f(x-h, y-k) - f(x-h, y+k) - f(x+h, y-k) + f(x+h, y+k)) / (4hk) + dZ12[1:-1, 1:-1] = (Z[:-2, :-2] - Z[:-2, 2:] - Z[2:, :-2] + Z[2:, 2:]) / ( + 4 * d1 * d2 + ) + return dZ1, dZ2, dZ12 + + +def interp_bicubic( + x, + y, + Z, + dZ1=None, + dZ2=None, + dZ12=None, + get_Y: bool = True, + get_dY: bool = False, + get_ddY: bool = False, +): + """ + Compute bicubic interpolation of a 2D grid at arbitrary locations. This will + smoothly interpolate a grid of points, including smooth first derivatives + and smooth cross derivative (d^2Y/dxdy). For the derivatives, continuity is + enforced, though the transition may be sharp as higher order derivatives are + not considered. + + The interpolation requires knowing the values of the first derivative in + each axis and the cross derivative. If these are not provided, they will be + estimated using central differences. For this function, the derivatives + should be provided in pixel units. The interpolation will be more accurate + if an analytic value is available for the derivatives. + + See Numerical Recipes in C, Chapter 3 (specifically: "Higher Order for + Smoothness: Bicubic Interpolation") for more details. + + Parameters + ---------- + x : torch.Tensor + x-coordinates of the points to interpolate. Must be a 0D or 1D tensor. + It should be in (-1,1) fov units, meaning that -1 is the left edge of + the left pixel, and 1 is the right edge of the right pixel. + y : torch.Tensor + y-coordinates of the points to interpolate. Must be a 0D or 1D tensor. + It should be in (-1,1) fov units, meaning that -1 is the bottom edge of + the bottom pixel, and 1 is the top edge of the top pixel. + Z : torch.Tensor + 2D grid of values to interpolate. The first axis corresponds to the + y-axis and the second axis to the x-axis. The values in Z correspond to + pixel center values, so Z[0,0] is the value at the center of the bottom + left corner pixel of the grid. The grid should be at least 2x2 so the + bicubic interpolation can go between the values. + dZ1 : torch.Tensor or None + First derivative of Z along the x-axis. If None, it will be estimated + using central differences. Note that the derivative should be computed + in pixel units, meaning that the distance from one pixel to the next is + considered "1" in these units. + dZ2 : torch.Tensor or None + First derivative of Z along the y-axis. If None, it will be estimated + using central differences. Note that the derivative should be computed + in pixel units, meaning that the distance from one pixel to the next is + considered "1" in these units. + dZ12 : torch.Tensor or None + Second derivative of Z along both axes. If None, it will be estimated + using central differences. Note that the derivative should be computed + in pixel units, meaning that the distance from one pixel to the next is + considered "1" in these units. + get_Y : bool + Whether to return the interpolated values. This will add the estimated Y + values to the return tuple + get_dY : bool + Whether to return the interpolated first derivatives. This will add dY1 + and dY2 to the return tuple + get_ddY : bool + Whether to return the interpolated second derivatives. This will add + dY12, dY11, and dY22 to the return tuple + + Returns + ------- + Y : torch.Tensor or None + Interpolated values at the given locations. Only returned if get_Y is + True + dY1 : torch.Tensor or None + Interpolated first derivative along the x-axis. Only returned if get_dY + is True + dY2 : torch.Tensor or None + Interpolated first derivative along the y-axis. Only returned if get_dY + is True + dY12 : torch.Tensor or None + Interpolated second derivative along both axes. Only returned if get_ddY + is True + dY11 : torch.Tensor or None + Interpolated second derivative along the x-axis. Only returned if + get_ddY is True + dY22 : torch.Tensor or None + Interpolated second derivative along the y-axis. Only returned if + get_ddY is True + """ + + if Z.ndim != 2: + raise ValueError(f"Z must be 2D (received {Z.ndim}D tensor)") + + if x.ndim > 1: + raise ValueError(f"x must be 0 or 1D (received {x.ndim}D tensor)") + if y.ndim > 1: + raise ValueError(f"y must be 0 or 1D (received {y.ndim}D tensor)") + + # Convert coordinates to pixel indices + h, w = Z.shape + x = 0.5 * ((x + 1) * w - 1) + x = x.clamp(-0.5, w - 0.5) + y = 0.5 * ((y + 1) * h - 1) + y = y.clamp(-0.5, h - 0.5) + + # Compute bicubic kernels if not provided + if dZ1 is None or dZ2 is None or dZ12 is None: + _dZ1, _dZ2, _dZ12 = bicubic_kernels(Z, 1.0, 1.0) + if dZ1 is None: + dZ1 = _dZ1 + if dZ2 is None: + dZ2 = _dZ2 + if dZ12 is None: + dZ12 = _dZ12 + + # Extract pixel values + x0 = x.floor().long() + y0 = y.floor().long() + x1 = x0 + 1 + y1 = y0 + 1 + x0 = x0.clamp(0, w - 2) + x1 = x1.clamp(1, w - 1) + y0 = y0.clamp(0, h - 2) + y1 = y1.clamp(1, h - 1) + + # Build interpolation vector + v = [] + v.append(Z[y0, x0]) + v.append(Z[y0, x1]) + v.append(Z[y1, x1]) + v.append(Z[y1, x0]) + v.append(dZ1[y0, x0]) + v.append(dZ1[y0, x1]) + v.append(dZ1[y1, x1]) + v.append(dZ1[y1, x0]) + v.append(dZ2[y0, x0]) + v.append(dZ2[y0, x1]) + v.append(dZ2[y1, x1]) + v.append(dZ2[y1, x0]) + v.append(dZ12[y0, x0]) + v.append(dZ12[y0, x1]) + v.append(dZ12[y1, x1]) + v.append(dZ12[y1, x0]) + v = torch.stack(v, dim=-1) + + # Compute interpolation coefficients + c = (torch.tensor(BC, dtype=v.dtype, device=v.device) @ v.unsqueeze(-1)).reshape( + -1, 4, 4 + ) + + # Compute interpolated values + return_interp = [] + t = torch.where( + (x < 0), (x % 1) - 1, torch.where(x >= w - 1, x % 1 + 1, x % 1) + ) # TODO: change to x - x0 + u = torch.where((y < 0), (y % 1) - 1, torch.where(y >= h - 1, y % 1 + 1, y % 1)) + if get_Y: + Y = torch.zeros_like(x) + for i in range(4): + for j in range(4): + Y = Y + c[:, i, j] * t**i * u**j + return_interp.append(Y) + if get_dY: + dY1 = torch.zeros_like(x) + dY2 = torch.zeros_like(x) + for i in range(4): + for j in range(4): + if i > 0: + dY1 = dY1 + i * c[:, i, j] * t ** (i - 1) * u**j + if j > 0: + dY2 = dY2 + j * c[:, i, j] * t**i * u ** (j - 1) + return_interp.append(dY1) + return_interp.append(dY2) + if get_ddY: + dY12 = torch.zeros_like(x) + dY11 = torch.zeros_like(x) + dY22 = torch.zeros_like(x) + for i in range(4): + for j in range(4): + if i > 0 and j > 0: + dY12 = dY12 + i * j * c[:, i, j] * t ** (i - 1) * u ** (j - 1) + if i > 1: + dY11 = dY11 + i * (i - 1) * c[:, i, j] * t ** (i - 2) * u**j + if j > 1: + dY22 = dY22 + j * (j - 1) * c[:, i, j] * t**i * u ** (j - 2) + return_interp.append(dY12) + return_interp.append(dY11) + return_interp.append(dY22) + return tuple(return_interp) + + def vmap_n( func: Callable, depth: int = 1, @@ -709,7 +960,7 @@ def vmap_n( return vmapd_func -def get_cluster_means(xs: Tensor, k: int): +def cluster_means(xs: Tensor, k: int): """ Computes cluster means using the k-means++ initialization algorithm. @@ -836,7 +1087,7 @@ def batch_lm( L_max=L_max, ) ) - L = L * torch.ones(B, device=X.device) + L = L * torch.ones(B, device=X.device, dtype=X.dtype) for _ in range(max_iter): Xnew, L, C = v_lm_step(X, Y, Cinv, L) if ( diff --git a/strip_output_keep_html.py b/strip_output_keep_html.py new file mode 100644 index 00000000..4ee84034 --- /dev/null +++ b/strip_output_keep_html.py @@ -0,0 +1,28 @@ +import sys +import nbformat +import re + + +def strip_output(nb): + for cell in nb.cells: + if cell.cell_type == "code": + # Preserve HTML comments in code cells + preserved_html = re.findall(r"", cell.source, re.DOTALL) + cell.outputs = [] + cell.execution_count = None + # Reinsert preserved HTML comments + for html in preserved_html: + cell.source += f"\n{html}\n" + elif cell.cell_type == "markdown": + pass # Preserving HTML in markdown cells + + +if __name__ == "__main__": + for filename in sys.argv[1:]: + with open(filename, "r", encoding="utf-8") as f: + nb = nbformat.read(f, as_version=nbformat.NO_CONVERT) + + strip_output(nb) + + with open(filename, "w", encoding="utf-8") as f: + nbformat.write(nb, f) diff --git a/tests/models/test_mod_api.py b/tests/models/test_mod_api.py index 88519d27..8c912a03 100644 --- a/tests/models/test_mod_api.py +++ b/tests/models/test_mod_api.py @@ -63,7 +63,7 @@ def sim_yaml(): simulator: name: minisim - kind: Lens_Source + kind: LensSource init_kwargs: # Single lense lens: *lens @@ -106,7 +106,7 @@ def sim_obj(): sie = caustics.SIE(cosmology=cosmology, name="lens") src = caustics.Sersic(name="source") lnslt = caustics.Sersic(name="lenslight") - return caustics.Lens_Source( + return caustics.LensSource( lens=sie, source=src, lens_light=lnslt, pixelscale=0.05, pixels_x=100 ) @@ -116,7 +116,7 @@ def test_build_simulator(sim_yaml_file, sim_obj, x_input): result = sim(x_input, quad_level=3) expected_result = sim_obj(x_input, quad_level=3) - assert sim.get_graph(True, True) + assert sim.graph(True, True) assert isinstance(result, torch.Tensor) assert torch.allclose(result, expected_result) @@ -199,7 +199,7 @@ def test_build_simulator_w_state(sim_yaml_file, sim_obj, x_input): newsim = caustics.build_simulator(sim_yaml_file) result = newsim(quad_level=3) expected_result = sim_obj(x_input, quad_level=3) - assert newsim.get_graph(True, True) + assert newsim.graph(True, True) assert isinstance(result, torch.Tensor) assert torch.allclose(result, expected_result) diff --git a/tests/test_base.py b/tests/test_base.py index 02e8c776..94ad97d8 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -39,6 +39,45 @@ def test(device): assert torch.all((sp_y - by).abs() < 1e-3) +def test_magnification(device): + z_l = torch.tensor(0.5, dtype=torch.float32, device=device) + z_s = torch.tensor(1.5, dtype=torch.float32, device=device) + + # Model + cosmology = FlatLambdaCDM(name="cosmo") + lens = SIE( + name="sie", + cosmology=cosmology, + z_l=z_l, + x0=torch.tensor(0.0), + y0=torch.tensor(0.0), + q=torch.tensor(0.4), + phi=torch.tensor(np.pi / 5), + b=torch.tensor(1.0), + ) + # Send to device + lens = lens.to(device) + + # Point in image plane + x = torch.tensor(0.1, device=device) + y = torch.tensor(0.1, device=device) + + mag = lens.magnification(x, y, z_s) + + assert np.isfinite(mag.item()) + assert mag.item() > 0 + + # grid in image plane + x = torch.linspace(-0.1, 0.1, 10, device=device) + y = torch.linspace(-0.1, 0.1, 10, device=device) + x, y = torch.meshgrid(x, y, indexing="ij") + + mag = lens.magnification(x, y, z_s) + + assert np.all(np.isfinite(mag.detach().cpu().numpy())) + assert np.all(mag.detach().cpu().numpy() > 0) + + def test_quicktest(device): """ Quick test to check that the built-in `test` module is working diff --git a/tests/test_enclosed_mass.py b/tests/test_enclosed_mass.py new file mode 100644 index 00000000..5c8887c8 --- /dev/null +++ b/tests/test_enclosed_mass.py @@ -0,0 +1,32 @@ +import caustics +import torch + + +def test_enclosed_mass_runs(device): + """ + Check that the enclosed mass profile runs without error. + """ + # Define a grid of points to test. + x, y = caustics.utils.meshgrid(0.2, 10, 10, device=device) + + cosmo = caustics.FlatLambdaCDM(name="cosmo") + + # Define the enclosed mass profile. + enclosed_mass = caustics.EnclosedMass( + cosmology=cosmo, + enclosed_mass=lambda r, p: 1 - torch.exp(-r / p), + z_l=0.5, + **caustics.EnclosedMass._null_params, + ) + enclosed_mass.to(device) + # Calculate the enclosed mass profile. + z_s = torch.tensor(1.0, device=device) + ax, ay = enclosed_mass.reduced_deflection_angle(x, y, z_s) + assert torch.all(torch.isfinite(ax)) + assert torch.all(torch.isfinite(ay)) + k = enclosed_mass.convergence(x, y, z_s) + assert torch.all(torch.isfinite(k)) + + +if __name__ == "__main__": + test_enclosed_mass_runs(torch.device("cpu")) diff --git a/tests/test_interpolate_image.py b/tests/test_interpolate_image.py index d6b56489..00562180 100644 --- a/tests/test_interpolate_image.py +++ b/tests/test_interpolate_image.py @@ -2,7 +2,7 @@ import torch from scipy.interpolate import RegularGridInterpolator -from caustics.utils import get_meshgrid, interp2d, interp3d +from caustics.utils import meshgrid, interp2d, interp3d from caustics.light import Pixelated @@ -41,7 +41,7 @@ def test_consistency(device): nx = 50 ny = 79 res = 1.0 - thx, thy = get_meshgrid(res, nx, ny, device=device) + thx, thy = meshgrid(res, nx, ny, device=device) thx = thx.double() thy = thy.double() scale_x = res * nx / 2 @@ -90,7 +90,7 @@ def test_pixelated_source(device): # Make sure pixelscale works as expected res = 0.05 n = 32 - x, y = get_meshgrid(res, n, n, device=device) + x, y = meshgrid(res, n, device=device) image = torch.ones(n, n, device=device) source = Pixelated(image=image, x0=0.0, y0=0.0, pixelscale=res) source.to(device=device) diff --git a/tests/test_jacobian_lens_equation.py b/tests/test_jacobian_lens_equation.py index 43e62c73..95d3fdf8 100644 --- a/tests/test_jacobian_lens_equation.py +++ b/tests/test_jacobian_lens_equation.py @@ -4,7 +4,7 @@ from caustics.cosmology import FlatLambdaCDM from caustics.lenses import SIE, Multiplane -from caustics.utils import get_meshgrid +from caustics.utils import meshgrid def test_jacobian_autograd_vs_finitediff(device): @@ -12,7 +12,7 @@ def test_jacobian_autograd_vs_finitediff(device): cosmology = FlatLambdaCDM(name="cosmo") lens = SIE(name="sie", cosmology=cosmology) lens.to(device=device) - thx, thy = get_meshgrid(0.01, 20, 20, device=device) + thx, thy = meshgrid(0.01, 20, device=device) # Parameters z_s = torch.tensor(1.2, device=device) @@ -55,7 +55,7 @@ def test_multiplane_jacobian(device): lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))], ) lens.to(device=device) - thx, thy = get_meshgrid(0.1, 10, 10, device=device) + thx, thy = meshgrid(0.1, 10, device=device) # Parameters z_s = torch.tensor(1.2, device=device) @@ -84,7 +84,7 @@ def test_multiplane_jacobian_autograd_vs_finitediff(device): lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))], ) lens.to(device=device) - thx, thy = get_meshgrid(0.01, 10, 10, device=device) + thx, thy = meshgrid(0.01, 10, device=device) # Parameters z_s = torch.tensor(1.2, device=device) @@ -122,7 +122,7 @@ def test_multiplane_effective_convergence(device): lenses=[SIE(name=f"sie_{i}", cosmology=cosmology) for i in range(len(xs))], ) lens.to(device=device) - thx, thy = get_meshgrid(0.1, 10, 10, device=device) + thx, thy = meshgrid(0.1, 10, device=device) # Parameters z_s = torch.tensor(1.2, device=device) diff --git a/tests/test_kappa_grid.py b/tests/test_kappa_grid.py index 64c81244..64014ccd 100644 --- a/tests/test_kappa_grid.py +++ b/tests/test_kappa_grid.py @@ -2,13 +2,13 @@ from caustics.cosmology import FlatLambdaCDM from caustics.lenses import PixelatedConvergence, PseudoJaffe -from caustics.utils import get_meshgrid +from caustics.utils import meshgrid def _setup(n_pix, mode, use_next_fast_len, padding="zero", device=None): # TODO understand why this test fails for resolutions != 0.025 res = 0.025 - thx, thy = get_meshgrid(res, n_pix, n_pix, device=device) + thx, thy = meshgrid(res, n_pix, device=device) z_l = torch.tensor(0.5, device=device) z_s = torch.tensor(2.1, device=device) @@ -47,7 +47,6 @@ def _setup(n_pix, mode, use_next_fast_len, padding="zero", device=None): # Approximate calculations lens_kap = PixelatedConvergence( res, - n_pix, cosmology, z_l=z_l, shape=(n_pix, n_pix), diff --git a/tests/test_lens_potential.py b/tests/test_lens_potential.py index e9005e8f..31c7e6e3 100644 --- a/tests/test_lens_potential.py +++ b/tests/test_lens_potential.py @@ -12,9 +12,7 @@ def test_lens_potential_vs_deflection(device): Check for internal consistency of the lensing potential for all ThinLens objects against the deflection angles. The gradient of the potential should equal the deflection angle. """ # Define a grid of points to test. - x = torch.linspace(-1, 1, 10, device=device) - y = torch.linspace(-1, 1, 10, device=device) - x, y = torch.meshgrid(x, y, indexing="ij") + x, y = caustics.utils.meshgrid(0.2, 10, 10, device=device) # Define a source redshift. z_s = 1.0 @@ -46,7 +44,12 @@ def test_lens_potential_vs_deflection(device): z_l=z_l, **caustics.lenses.PixelatedConvergence._null_params, pixelscale=0.1, - n_pix=10, + ), + caustics.lenses.PixelatedPotential( + cosmology=cosmo, + z_l=z_l, + **caustics.lenses.PixelatedPotential._null_params, + pixelscale=0.2, ), caustics.lenses.Point( cosmology=cosmo, z_l=z_l, **caustics.lenses.Point._null_params @@ -83,7 +86,6 @@ def test_lens_potential_vs_deflection(device): # Compute the lensing potential. phi = lens.potential(x, y, z_s) - # Compute the gradient of the lensing potential. phi_ax, phi_ay = torch.autograd.grad( phi, (x, y), grad_outputs=torch.ones_like(phi) @@ -99,8 +101,8 @@ def test_lens_potential_vs_deflection(device): assert torch.allclose(phi_ax, ax, rtol=1e0) assert torch.allclose(phi_ay, ay, rtol=1e0) else: - assert torch.allclose(phi_ax, ax) - assert torch.allclose(phi_ay, ay) + assert torch.allclose(phi_ax, ax, atol=1e-5) + assert torch.allclose(phi_ay, ay, atol=1e-5) def test_lens_potential_vs_convergence(device): @@ -108,9 +110,7 @@ def test_lens_potential_vs_convergence(device): Check for internal consistency of the lensing potential for all ThinLens objects against the convergence. The laplacian of the potential should equal the convergence. """ # Define a grid of points to test. - x = torch.linspace(-1, 1, 10, device=device) - y = torch.linspace(-1, 1, 10, device=device) - x, y = torch.meshgrid(x, y, indexing="ij") + x, y = caustics.utils.meshgrid(0.2, 10, 10, device=device) x, y = x.clone().detach(), y.clone().detach() # Define a source redshift. @@ -145,6 +145,12 @@ def test_lens_potential_vs_convergence(device): # pixelscale=0.2, # n_pix=10, # ), # cannot compute Hessian of PixelatedConvergence potential, always returns zeros due to bilinear interpolation + caustics.lenses.PixelatedPotential( + cosmology=cosmo, + z_l=z_l, + **caustics.lenses.PixelatedPotential._null_params, + pixelscale=0.2, + ), # caustics.lenses.Point(cosmology=cosmo, z_l=z_l, **caustics.lenses.Point._null_params), # Point mass convergence is delta function caustics.lenses.PseudoJaffe( cosmology=cosmo, z_l=z_l, **caustics.lenses.PseudoJaffe._null_params @@ -184,5 +190,7 @@ def test_lens_potential_vs_convergence(device): # Check that the laplacian of the lensing potential equals the convergence. if name in ["NFW", "TNFW"]: assert torch.allclose(phi_kappa, kappa, atol=1e-4) + elif name in ["PixelatedConvergence", "PixelatedPotential"]: + assert torch.allclose(phi_kappa, kappa, atol=1e-4) else: assert torch.allclose(phi_kappa, kappa) diff --git a/tests/test_masssheet.py b/tests/test_masssheet.py index 051ddc52..a1f79ef2 100644 --- a/tests/test_masssheet.py +++ b/tests/test_masssheet.py @@ -3,7 +3,7 @@ from caustics.cosmology import FlatLambdaCDM from caustics.lenses import MassSheet -from caustics.utils import get_meshgrid +from caustics.utils import meshgrid def test(sim_source, device, lens_models): @@ -32,7 +32,7 @@ def test(sim_source, device, lens_models): z_s = torch.tensor(1.2) x = torch.tensor([0.5, 0.0, 0.0, 0.7]) - thx, thy = get_meshgrid(0.01, 10, 10, device=device) + thx, thy = meshgrid(0.01, 10, device=device) lens.reduced_deflection_angle(thx, thy, z_s, x) diff --git a/tests/test_microlens_sim.py b/tests/test_microlens_sim.py new file mode 100644 index 00000000..067c5b3a --- /dev/null +++ b/tests/test_microlens_sim.py @@ -0,0 +1,25 @@ +import caustics +import torch + + +def test_microlens_simulator_selfconsistent(): + cosmology = caustics.FlatLambdaCDM() + sie = caustics.SIE(cosmology=cosmology, name="lens") + src = caustics.Sersic(name="source") + + x = torch.tensor([ + # z_s z_l x0 y0 q phi b x0 y0 q phi n Re Ie + 1.5, 0.5, -0.2, 0.0, 0.4, 1.5708, 1.7, 0.0, 0.0, 0.5, -0.985, 1.3, 1.0, 5.0 + ]) # fmt: skip + fov = torch.tensor((-1, -0.5, -0.25, 0.25)) + sim = caustics.Microlens(lens=sie, source=src) + res_mcmc = sim(x, fov=fov, method="mcmc", N_mcmc=10000) + res_grid = sim(x, fov=fov, method="grid", N_grid=100) + + # Ensure the flux estimates agree within 5sigma + assert (res_mcmc[0] - res_grid[0]).abs().detach().cpu().numpy() < 5 * res_grid[ + 1 + ].detach().cpu().numpy() + # Ensure the uncertainties are small + assert res_mcmc[1].abs().detach().cpu().numpy() < 1e-2 + assert res_grid[1].abs().detach().cpu().numpy() < 1e-2 diff --git a/tests/test_multiplane.py b/tests/test_multiplane.py index 5aa13898..308823fe 100644 --- a/tests/test_multiplane.py +++ b/tests/test_multiplane.py @@ -10,7 +10,7 @@ from caustics.cosmology import FlatLambdaCDM from caustics.lenses import SIE, Multiplane, PixelatedConvergence -from caustics.utils import get_meshgrid +from caustics.utils import meshgrid def test(sim_source, device, lens_models): @@ -122,10 +122,9 @@ def test_multiplane_time_delay(device): n_pix = 10 res = 0.05 upsample_factor = 2 - thx, thy = get_meshgrid( + thx, thy = meshgrid( res / upsample_factor, upsample_factor * n_pix, - upsample_factor * n_pix, dtype=torch.float32, device=device, ) @@ -184,7 +183,6 @@ def test_params(device): lens = PixelatedConvergence( name=f"plane_{p}", pixelscale=pixel_size, - n_pix=pixels, cosmology=cosmology, z_l=z[p], x0=0.0, @@ -197,7 +195,7 @@ def test_params(device): multiplane_lens = Multiplane(cosmology=cosmology, lenses=planes) multiplane_lens.to(device=device) z_s = torch.tensor(z_s) - x, y = get_meshgrid(pixel_size, 32, 32, device=device) + x, y = meshgrid(pixel_size, 32, device=device) params = [torch.randn(pixels, pixels, device=device) for i in range(10)] # Test out the computation of a few quantities to make sure params are passed correctly diff --git a/tests/test_parameter.py b/tests/test_parameter.py index 8f1c49d8..e969443e 100644 --- a/tests/test_parameter.py +++ b/tests/test_parameter.py @@ -19,16 +19,15 @@ def test_shape_error_messages(): # module = Pixelated(shape=(8,)) fov = 7.8 - n_pix = 20 cosmo = FlatLambdaCDM() with pytest.raises(TypeError): # User cannot enter a list, only a tuple # (because of type checking and consistency with torch) - PixelatedConvergence(fov, n_pix, cosmo, shape=[8, 8]) + PixelatedConvergence(fov, cosmo, shape=[8, 8]) with pytest.raises(ValueError): # wrong number of dimensions - PixelatedConvergence(fov, n_pix, cosmo, shape=(8,)) + PixelatedConvergence(fov, cosmo, shape=(8,)) def test_repr(): diff --git a/tests/test_parametrized.py b/tests/test_parametrized.py index b008d685..bc4eafa4 100644 --- a/tests/test_parametrized.py +++ b/tests/test_parametrized.py @@ -54,8 +54,8 @@ def test_params(Sim): def test_graph(): sim, _ = setup_simulator() - sim.get_graph(True, True) - sim.get_graph() + sim.graph(True, True) + sim.graph() def test_unpack_all_modules_dynamic(): diff --git a/tests/test_pixel_grid.py b/tests/test_pixel_grid.py index 0de8aa5b..6faa3aad 100644 --- a/tests/test_pixel_grid.py +++ b/tests/test_pixel_grid.py @@ -1,14 +1,14 @@ import numpy as np from lenstronomy.Data.pixel_grid import PixelGrid -from caustics.utils import get_meshgrid +from caustics.utils import meshgrid -def test_get_meshgrid(device): +def test_meshgrid(device): res = 0.05 nx = 200 ny = 200 - thx, thy = get_meshgrid(res, nx, ny, device=device) + thx, thy = meshgrid(res, nx, ny, device=device) res = 0.05 # size of pixel in angular coordinates # ra_at_xy_0, dec_at_xy_0 = ( diff --git a/tests/test_sersic.py b/tests/test_sersic.py index b024df9c..c215bbb3 100644 --- a/tests/test_sersic.py +++ b/tests/test_sersic.py @@ -6,7 +6,7 @@ from lenstronomy.LightModel.light_model import LightModel from caustics.light import Sersic -from caustics.utils import get_meshgrid +from caustics.utils import meshgrid def test(sim_source, device, light_models): @@ -14,7 +14,7 @@ def test(sim_source, device, light_models): res = 0.05 nx = 200 ny = 200 - thx, thy = get_meshgrid(res, nx, ny, device=device) + thx, thy = meshgrid(res, nx, ny, device=device) if sim_source == "yaml": yaml_str = """\ diff --git a/tests/test_sie.py b/tests/test_sie.py index 91748496..f4d06e3a 100644 --- a/tests/test_sie.py +++ b/tests/test_sie.py @@ -8,7 +8,7 @@ from caustics.cosmology import FlatLambdaCDM from caustics.lenses import SIE -from caustics.utils import get_meshgrid +from caustics.utils import meshgrid def test(sim_source, device, lens_models): @@ -65,7 +65,7 @@ def test_sie_time_delay(): n_pix = 10 res = 0.05 upsample_factor = 2 - thx, thy = get_meshgrid( + thx, thy = meshgrid( res / upsample_factor, upsample_factor * n_pix, upsample_factor * n_pix, diff --git a/tests/test_simulator_runs.py b/tests/test_simulator_runs.py index 91569067..0efa49ff 100644 --- a/tests/test_simulator_runs.py +++ b/tests/test_simulator_runs.py @@ -2,7 +2,7 @@ import torch -from caustics.sims import Lens_Source +from caustics.sims import LensSource, Microlens from caustics.cosmology import FlatLambdaCDM from caustics.lenses import SIE from caustics.light import Sersic @@ -67,7 +67,7 @@ def test_simulator_runs(sim_source, device, mocker): simulator: name: simulator - kind: Lens_Source + kind: LensSource params: z_s: 2.0 init_kwargs: @@ -104,7 +104,7 @@ def test_simulator_runs(sim_source, device, mocker): psf = gaussian(0.05, 11, 11, 0.2, upsample=2) - sim = Lens_Source( + sim = LensSource( name="simulator", lens=lensmass, source=source, @@ -166,3 +166,18 @@ def test_simulator_runs(sim_source, device, mocker): # Check quadrature integration is accurate assert torch.allclose(sim(), sim(quad_level=3), rtol=1e-1) assert torch.allclose(sim(quad_level=3), sim(quad_level=5), rtol=1e-2) + + +def test_microlens_simulator_runs(): + cosmology = FlatLambdaCDM() + sie = SIE(cosmology=cosmology, name="lens") + src = Sersic(name="source") + + x = torch.tensor([ + # z_s z_l x0 y0 q phi b x0 y0 q phi n Re Ie + 1.5, 0.5, -0.2, 0.0, 0.4, 1.5708, 1.7, 0.0, 0.0, 0.5, -0.985, 1.3, 1.0, 5.0 + ]) # fmt: skip + fov = torch.tensor((-1, -0.5, -0.25, 0.25)) + sim = Microlens(lens=sie, source=src) + sim(x, fov=fov) + sim(x, fov=fov, method="grid") diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py index b8cc9102..d1121b0d 100644 --- a/tests/utils/__init__.py +++ b/tests/utils/__init__.py @@ -11,7 +11,7 @@ from caustics.lenses import ThinLens, EPL, NFW, ThickLens, PixelatedConvergence from caustics.light import Sersic, Pixelated -from caustics.utils import get_meshgrid +from caustics.utils import meshgrid from caustics.sims import Simulator from caustics.cosmology import FlatLambdaCDM from .models import mock_from_file @@ -39,7 +39,7 @@ def __init__(self, name="simulator"): else: self.lens = EPL(self.cosmo, z_l=z_l, name="lens") self.sersic = Sersic(name="source") - self.thx, self.thy = get_meshgrid(0.04, n_pix, n_pix, device=device) + self.thx, self.thy = meshgrid(0.04, n_pix, device=device) self.n_pix = n_pix self.to(device=device) @@ -114,7 +114,6 @@ def __init__(self, name="test"): self.epl = EPL(self.cosmo, z_l=z_l, name="lens") self.kappa = PixelatedConvergence( pixel_scale, - n_pix, self.cosmo, z_l=z_l, shape=(n_pix, n_pix), @@ -127,7 +126,7 @@ def __init__(self, name="test"): shape=(n_pix, n_pix), name="source", ) - self.thx, self.thy = get_meshgrid(pixel_scale, n_pix, n_pix, device=device) + self.thx, self.thy = meshgrid(pixel_scale, n_pix, device=device) self.n_pix = n_pix self.to(device=device) @@ -186,7 +185,7 @@ def get_default_cosmologies(device=None): def setup_grids(res=0.05, n_pix=100, device=None): # Caustics setup - thx, thy = get_meshgrid(res, n_pix, n_pix, device=device) + thx, thy = meshgrid(res, n_pix, device=device) if device is not None: thx = thx.to(device=device) thy = thy.to(device=device) diff --git a/tests/utils/models.py b/tests/utils/models.py index 51ab882b..14f90590 100644 --- a/tests/utils/models.py +++ b/tests/utils/models.py @@ -33,10 +33,9 @@ def setup_complex_multiplane_yaml(): res = 0.05 upsample_factor = 2 fov = res * n_pix - thx, thy = caustics.utils.get_meshgrid( + thx, thy = caustics.utils.meshgrid( res / upsample_factor, upsample_factor * n_pix, - upsample_factor * n_pix, dtype=torch.float32, ) z_s = torch.tensor(1.5, dtype=torch.float32) @@ -163,7 +162,7 @@ def setup_complex_multiplane_yaml(): sim_yaml = obj_to_yaml({ "simulator": { "name": "sim", - "kind": "Lens_Source", + "kind": "LensSource", "init_kwargs": { "lens": f"*{lens.name}", "source": "*source",