From c688ba49a999a97bd610fa42fd0385f69f6c555b Mon Sep 17 00:00:00 2001 From: Ian Bolliger Date: Mon, 8 Jul 2024 20:02:55 -0400 Subject: [PATCH] Feature/update upstream (#38) * change nodata_value to -99999 in topotools Previous value -9999 did not have enough digits since new etopo 2022 data has this as an actual topography value in the Mariana Trench (when written with `Z_format='%.0f'`). * Fix bug related to tracking pressure at gauges * simplify make_fgout_animation.py use of update_artists These do not need to be passed into update, unpacked, and repacked, since the objects created in the script will be used into update. If blit==False then they are not needed at all. * use image backend Agg in make_fgout_animation.py so animation size agrees with specified figure size * set blit=False and auto-detect number of fgout frames * clean up make_fgout_animation.py * added new chile2010_fgmax-fgout/make_fgout_animation_with_transect.py * Add dZ_format parameter to DTopography.write function The default is now '%.3f', millimeter resolution, making smaller dtopo files than previously. * handle 0 radius * fix type on set_pressure * fix duplicate r * make comparison fp-safe * handle underflow * avoid underflow in wind setting too * point to riemann/src for Riemann solvers * Remove Riemann solvers from src/2d/bouss, Makefile.common points to riemann/src Note that rpn2_geoclaw.f and geoclaw_riemann_utils.f were update in riemann/src to handle 5 waves and rpt2_geoclaw_sym.f was discarded in favor of standard rpt2.f, which works fine with 5 waves. * Initial CI GitHub action script * Update testing.yml * Add checking out of clawpack and geoclaw * Add lint and testing to CI * Disable linting for the time being * Re-enable linting but only for geoclaw * Exclude old_topotools.py Do not want to touch the old topotools file for reference. * Fix up gauge plotting for storm surge Includes a couple of minor bugs related to gauge plotting. Major change involves how we now plot the gauge data. * Add dry gauge plotting * create gauge filenames that allow more than 5 digits in gauge number If fewer than 5 digits, still add zero padding e.g. gauge00012.txt but no longer throws an error for large gauge numbers, e.g. gauge1234567.txt * cleaner way to zero pad only if fewer than 5 digits using I0.5 format * Cleanup and reimplement reading of ATCF and writing of GeoClaw storms * Minor string comparison bug fix * Extract name of storm from file name * Minor tweaks and fixes * Fix missing comma * AutoPEP8 surge code * add fgout output_style parameter and support for array of output_times * remove trailing whitespaces in fgout_module.f990 * Refactor fgout_module.f90 so it works for either GeoClaw or D-Claw and support in fgout_tools.py for new dclaw attribute dclaw to set in setrun.py to indicate D-Claw, in which case 7 components of q are output instead of 4. Support for eventually indicating fewer components to output. * Use geoclaw module rho * Remove module level parameters that were not needed or were specific These mostly pertained to the CLE code. * Initial implementation of rotation control * Minor bugfixes and rearranging * Fix bugs in the non-spherical coordinates for storms" * fix fgout_tools.FGoutGrid.read_fgout_grids_data for time array Now that `output_style==2` is supported for fgout grids (an array of times, see https://github.com/clawpack/geoclaw/pull/617), the function fgout_tools.FGoutGrid.read_fgout_grids_data needs to be fixed to properly read in the new format of `fgout_grids.data`, also note that `nout` now comes before `tstart` and `tend` for `output_style==1`. --------- Co-authored-by: Randy LeVeque Co-authored-by: Kyle Mandli --- .github/workflows/testing.yml | 7 +- examples/storm-surge/ike/setplot.py | 43 +- examples/storm-surge/isaac/setplot.py | 116 ++- examples/storm-surge/isaac/setrun.py | 17 +- src/2d/bouss/Makefile.bouss | 6 +- src/2d/bouss/geoclaw_riemann_utils.f | 658 ----------------- src/2d/bouss/rpn2_geoclaw.f | 305 -------- src/2d/bouss/rpt2_geoclaw_sym.f | 260 ------- src/2d/shallow/fgout_module.f90 | 512 ++++++++----- src/2d/shallow/gauges_module.f90 | 21 +- src/2d/shallow/src2.f90 | 12 +- src/2d/shallow/surge/model_storm_module.f90 | 98 ++- src/2d/shallow/surge/storm_module.f90 | 37 +- src/python/geoclaw/data.py | 33 +- src/python/geoclaw/fgout_tools.py | 780 ++++++++++---------- src/python/geoclaw/surge/plot.py | 71 +- src/python/geoclaw/surge/quality.py | 147 ++-- src/python/geoclaw/surge/storm.py | 400 +++++----- src/python/geoclaw/util.py | 3 +- 19 files changed, 1248 insertions(+), 2278 deletions(-) delete mode 100644 src/2d/bouss/geoclaw_riemann_utils.f delete mode 100644 src/2d/bouss/rpn2_geoclaw.f delete mode 100644 src/2d/bouss/rpt2_geoclaw_sym.f diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index b672f0211..4d1d669d4 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -40,12 +40,7 @@ jobs: - name: Setup geoclaw run: | cd geoclaw - if [[ "${{ github.event_name }}" == "pull_request" ]]; then - git fetch origin pull/${{ github.event.pull_request.number }}/merge:PR - git checkout PR - else - git checkout ${{ github.ref_name }} - fi + git checkout ${{ github.ref }} - name: Lint with flake8 run: | cd geoclaw diff --git a/examples/storm-surge/ike/setplot.py b/examples/storm-surge/ike/setplot.py index 5784f9ce5..865093be4 100644 --- a/examples/storm-surge/ike/setplot.py +++ b/examples/storm-surge/ike/setplot.py @@ -155,38 +155,23 @@ def friction_after_axes(cd): plotfigure.show = True plotfigure.clf_each_gauge = True - # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() - plotaxes.xlimits = [-2, 1] - # plotaxes.xlabel = "Days from landfall" - # plotaxes.ylabel = "Surface (m)" - plotaxes.ylimits = [-1, 5] - plotaxes.title = 'Surface' - - def gauge_afteraxes(cd): - - axes = plt.gca() - surgeplot.plot_landfall_gauge(cd.gaugesoln, axes) - - # Fix up plot - in particular fix time labels - axes.set_title('Station %s' % cd.gaugeno) - axes.set_xlabel('Days relative to landfall') - axes.set_ylabel('Surface (m)') - axes.set_xlim([-2, 1]) - axes.set_ylim([-1, 5]) - axes.set_xticks([-2, -1, 0, 1]) - axes.set_xticklabels([r"$-2$", r"$-1$", r"$0$", r"$1$"]) - axes.grid(True) - plotaxes.afteraxes = gauge_afteraxes - - # Plot surface as blue curve: + plotaxes.time_scale = 1 / (24 * 60**2) + plotaxes.grid = True + plotaxes.xlimits = 'auto' + plotaxes.ylimits = 'auto' + plotaxes.title = "Surface" + plotaxes.ylabel = "Surface (m)" + plotaxes.time_label = "Days relative to landfall" + plotitem = plotaxes.new_plotitem(plot_type='1d_plot') - # plotitem.plot_var = 3 - # plotitem.plotstyle = 'b-' - - # + plotitem.plot_var = surgeplot.gauge_surface + # Plot red area if gauge is dry + plotitem = plotaxes.new_plotitem(plot_type='1d_plot') + plotitem.plot_var = surgeplot.gauge_dry_regions + plotitem.kwargs = {"color":'lightcoral', "linewidth":5} + # Gauge Location Plot - # def gauge_location_afteraxes(cd): plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97) surge_afteraxes(cd) diff --git a/examples/storm-surge/isaac/setplot.py b/examples/storm-surge/isaac/setplot.py index 5fb7aebc3..8fe715b15 100644 --- a/examples/storm-surge/isaac/setplot.py +++ b/examples/storm-surge/isaac/setplot.py @@ -1,20 +1,18 @@ - -from __future__ import absolute_import -from __future__ import print_function +#!/usr/bin/env python import os +import warnings +import datetime -import numpy +import numpy as np import matplotlib.pyplot as plt -import datetime import clawpack.visclaw.colormaps as colormap import clawpack.visclaw.gaugetools as gaugetools import clawpack.clawutil.data as clawutil import clawpack.amrclaw.data as amrclaw import clawpack.geoclaw.data as geodata - - +import clawpack.geoclaw.util as geoutil import clawpack.geoclaw.surge.plot as surgeplot try: @@ -88,9 +86,9 @@ def friction_after_axes(cd): regions = {"Gulf": {"xlimits": (clawdata.lower[0], clawdata.upper[0]), "ylimits": (clawdata.lower[1], clawdata.upper[1]), "figsize": (6.4, 4.8)}, - "Louisiana": {"xlimits": (-92, -83), - "ylimits": (27.5, 30.5), - "figsize": (8, 2.7)}} + "Louisiana": {"xlimits": (-92, -83), + "ylimits": (27.5, 30.5), + "figsize": (8, 2.7)}} for (name, region_dict) in regions.items(): @@ -175,40 +173,82 @@ def friction_after_axes(cd): # ======================================================================== # Figures for gauges # ======================================================================== + def plot_observed(current_data): + """Fetch and plot gauge data for gauges used.""" + + # Map GeoClaw gauge number to NOAA gauge number and location/name + gauge_mapping = {1: [8761724, "Grand Isle, LA"], + 2: [8760922, 'Pilots Station East, SW Pass, LA']} + + station_id, station_name = gauge_mapping[current_data.gaugesoln.id] + landfall_time = np.datetime64(datetime.datetime(2012, 8, 29, 0)) + begin_date = datetime.datetime(2012, 8, 27) + end_date = datetime.datetime(2012, 8, 31) + + # Fetch data if needed + date_time, water_level, tide = geoutil.fetch_noaa_tide_data(station_id, + begin_date, + end_date) + if water_level is None: + print("*** Could not fetch gauge {}.".format(station_id)) + else: + # Convert to seconds relative to landfall + t = (date_time - landfall_time) / np.timedelta64(1, 's') + t /= (24 * 60**2) + + # Detide + water_level -= tide + + # Plot data + ax = plt.gca() + ax.plot(t, water_level, color='lightgray', marker='x') + ax.set_title(station_name) + ax.legend(['Computed', "Observed"]) + + plotfigure = plotdata.new_plotfigure(name='Gauge Surfaces', figno=300, type='each_gauge') plotfigure.show = True plotfigure.clf_each_gauge = True - # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() - plotaxes.xlimits = [-2, 1] - # plotaxes.xlabel = "Days from landfall" - # plotaxes.ylabel = "Surface (m)" - plotaxes.ylimits = [-1, 5] - plotaxes.title = 'Surface' - - def gauge_afteraxes(cd): - - axes = plt.gca() - landfall = 0. - surgeplot.plot_landfall_gauge(cd.gaugesoln, axes, landfall=landfall) - - # Fix up plot - in particular fix time labels - axes.set_title('Station %s' % cd.gaugeno) - axes.set_xlabel('Days relative to landfall') - axes.set_ylabel('Surface (m)') - axes.set_xlim([-2, 1]) - axes.set_ylim([-1, 5]) - axes.set_xticks([-2, -1, 0, 1]) - axes.set_xticklabels([r"$-2$", r"$-1$", r"$0$", r"$1$"]) - axes.grid(True) - plotaxes.afteraxes = gauge_afteraxes - - # Plot surface as blue curve: + plotaxes.time_scale = 1 / (24 * 60**2) + plotaxes.grid = True + plotaxes.xlimits = [-2, 1.5] + plotaxes.ylimits = [-0.25, 1] + plotaxes.title = "Surface" + plotaxes.ylabel = "Surface (m)" + plotaxes.time_label = "Days relative to landfall" + plotaxes.afteraxes = plot_observed + + plotitem = plotaxes.new_plotitem(plot_type='1d_plot') + plotitem.plot_var = surgeplot.gauge_surface + # Plot red area if gauge is dry plotitem = plotaxes.new_plotitem(plot_type='1d_plot') - plotitem.plot_var = 3 - plotitem.plotstyle = 'b-' + plotitem.plot_var = surgeplot.gauge_dry_regions + plotitem.kwargs = {"color":'lightcoral', "linewidth":5} + + # Gauge Location Plot + def gauge_location_afteraxes(cd): + plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97) + surge_afteraxes(cd) + gaugetools.plot_gauge_locations(cd.plotdata, gaugenos='all', + format_string='ko', add_labels=True) + + plotfigure = plotdata.new_plotfigure(name="Gauge Locations") + plotfigure.show = True + + # Set up for axes in this figure: + plotaxes = plotfigure.new_plotaxes() + plotaxes.title = 'Gauge Locations' + plotaxes.scaled = True + plotaxes.xlimits = regions['Louisiana']['xlimits'] + plotaxes.ylimits = regions['Louisiana']['ylimits'] + plotaxes.afteraxes = gauge_location_afteraxes + surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits) + surgeplot.add_land(plotaxes, bounds=[0.0, 20.0]) + plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10 + plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10 # ----------------------------------------- # Parameters used only when creating html and/or latex hardcopy @@ -217,7 +257,7 @@ def gauge_afteraxes(cd): plotdata.printfigs = True # print figures plotdata.print_format = 'png' # file format plotdata.print_framenos = 'all' # list of frames to print - plotdata.print_gaugenos = [1, 2, 3, 4] # list of gauges to print + plotdata.print_gaugenos = 'all' # list of gauges to print plotdata.print_fignos = 'all' # list of figures to print plotdata.html = True # create html files of plots? plotdata.latex = True # create latex file of plots? diff --git a/examples/storm-surge/isaac/setrun.py b/examples/storm-surge/isaac/setrun.py index d6d63eb93..4e8299286 100644 --- a/examples/storm-surge/isaac/setrun.py +++ b/examples/storm-surge/isaac/setrun.py @@ -313,17 +313,16 @@ def setrun(claw_pkg='geoclaw'): regions = rundata.regiondata.regions # to specify regions of refinement append lines of the form # [minlevel,maxlevel,t1,t2,x1,x2,y1,y2] - # Gauges from Ike AWR paper (2011 Dawson et al) - rundata.gaugedata.gauges.append([1, -95.04, 29.07, - rundata.clawdata.t0, - rundata.clawdata.tfinal]) - rundata.gaugedata.gauges.append([2, -94.71, 29.28, - rundata.clawdata.t0, - rundata.clawdata.tfinal]) - rundata.gaugedata.gauges.append([3, -94.39, 29.49, + + # Pilots Station East, S.W. Pass, LA - 28°55.9429'N, 89°24.4445'W + # https://tidesandcurrents.noaa.gov/stationhome.html?id=8760922 + rundata.gaugedata.gauges.append([1, -89.40740833, 28.93238167, rundata.clawdata.t0, rundata.clawdata.tfinal]) - rundata.gaugedata.gauges.append([4, -94.13, 29.58, + + # Grand Isle, LA - 29°15.8761'N 89°57.4960'W + # https://tidesandcurrents.noaa.gov/stationhome.html?id=8761724 + rundata.gaugedata.gauges.append([2, -89.95826667, 29.26460167, rundata.clawdata.t0, rundata.clawdata.tfinal]) diff --git a/src/2d/bouss/Makefile.bouss b/src/2d/bouss/Makefile.bouss index 225e7a539..07a4e6c01 100644 --- a/src/2d/bouss/Makefile.bouss +++ b/src/2d/bouss/Makefile.bouss @@ -167,6 +167,6 @@ COMMON_SOURCES += \ $(BOUSSLIB)/resetBoussStuff.f \ $(BOUSSLIB)/simpleBound.f90 \ $(BOUSSLIB)/umfpack_support.f \ - $(BOUSSLIB)/rpn2_geoclaw.f \ - $(BOUSSLIB)/rpt2_geoclaw_sym.f \ - $(BOUSSLIB)/geoclaw_riemann_utils.f \ + $(CLAW)/riemann/src/rpn2_geoclaw.f \ + $(CLAW)/riemann/src/rpt2_geoclaw.f \ + $(CLAW)/riemann/src/geoclaw_riemann_utils.f \ diff --git a/src/2d/bouss/geoclaw_riemann_utils.f b/src/2d/bouss/geoclaw_riemann_utils.f deleted file mode 100644 index 4745cf43d..000000000 --- a/src/2d/bouss/geoclaw_riemann_utils.f +++ /dev/null @@ -1,658 +0,0 @@ -c----------------------------------------------------------------------- - subroutine riemann_aug_JCP(maxiter,meqn,mwaves,hL,hR,huL,huR, - & hvL,hvR,bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,sE1,sE2,drytol,g,rho, - & sw,fw) - - ! solve shallow water equations given single left and right states - ! This solver is described in J. Comput. Phys. (6): 3089-3113, March 2008 - ! Augmented Riemann Solvers for the Shallow Equations with Steady States and Inundation - - ! To use the original solver call with maxiter=1. - - ! This solver allows iteration when maxiter > 1. The iteration seems to help with - ! instabilities that arise (with any solver) as flow becomes transcritical over variable topo - ! due to loss of hyperbolicity. - - implicit none - - !input - integer meqn,mwaves,maxiter - double precision fw(meqn,mwaves) - double precision sw(mwaves) - double precision hL,hR,huL,huR,bL,bR,uL,uR,phiL,phiR,sE1,sE2 - double precision hvL,hvR,vL,vR,pL,pR - double precision drytol,g,rho - - - !local - integer m,mw,k,iter - double precision A(3,3) - double precision r(3,3) - double precision lambda(3) - double precision del(3) - double precision beta(3) - - double precision delh,delhu,delphi,delb,delnorm - double precision rare1st,rare2st,sdelta,raremin,raremax - double precision criticaltol,convergencetol,raretol - double precision criticaltol_2, hustar_interface - double precision s1s2bar,s1s2tilde,hbar,hLstar,hRstar,hustar - double precision huRstar,huLstar,uRstar,uLstar,hstarHLL - double precision deldelh,deldelphi,delP - double precision s1m,s2m,hm - double precision det1,det2,det3,determinant - - logical rare1,rare2,rarecorrector,rarecorrectortest,sonic - - !determine del vectors - delh = hR-hL - delhu = huR-huL - delphi = phiR-phiL - delb = bR-bL - delP = pR - pL - delnorm = delh**2 + delphi**2 - - call riemanntype(hL,hR,uL,uR,hm,s1m,s2m,rare1,rare2, - & 1,drytol,g) - - - lambda(1)= min(sE1,s2m) !Modified Einfeldt speed - lambda(3)= max(sE2,s1m) !Modified Eindfeldt speed - sE1=lambda(1) - sE2=lambda(3) - lambda(2) = 0.d0 ! ### Fix to avoid uninitialized value in loop on mw -- Correct?? ### - - - hstarHLL = max((huL-huR+sE2*hR-sE1*hL)/(sE2-sE1),0.d0) ! middle state in an HLL solve - -c !determine the middle entropy corrector wave------------------------ - rarecorrectortest=.false. - rarecorrector=.false. - if (rarecorrectortest) then - sdelta=lambda(3)-lambda(1) - raremin = 0.5d0 - raremax = 0.9d0 - if (rare1.and.sE1*s1m.lt.0.d0) raremin=0.2d0 - if (rare2.and.sE2*s2m.lt.0.d0) raremin=0.2d0 - if (rare1.or.rare2) then - !see which rarefaction is larger - rare1st=3.d0*(sqrt(g*hL)-sqrt(g*hm)) - rare2st=3.d0*(sqrt(g*hR)-sqrt(g*hm)) - if (max(rare1st,rare2st).gt.raremin*sdelta.and. - & max(rare1st,rare2st).lt.raremax*sdelta) then - rarecorrector=.true. - if (rare1st.gt.rare2st) then - lambda(2)=s1m - elseif (rare2st.gt.rare1st) then - lambda(2)=s2m - else - lambda(2)=0.5d0*(s1m+s2m) - endif - endif - endif - if (hstarHLL.lt.min(hL,hR)/5.d0) rarecorrector=.false. - endif - -c ## Is this correct 2-wave when rarecorrector == .true. ?? - do mw=1,mwaves - r(1,mw)=1.d0 - r(2,mw)=lambda(mw) - r(3,mw)=(lambda(mw))**2 - enddo - if (.not.rarecorrector) then - lambda(2) = 0.5d0*(lambda(1)+lambda(3)) -c lambda(2) = max(min(0.5d0*(s1m+s2m),sE2),sE1) - r(1,2)=0.d0 - r(2,2)=0.d0 - r(3,2)=1.d0 - endif -c !--------------------------------------------------- - - -c !determine the steady state wave ------------------- - !criticaltol = 1.d-6 - ! MODIFIED: - criticaltol = max(drytol*g, 1d-6) - criticaltol_2 = sqrt(criticaltol) - deldelh = -delb - deldelphi = -0.5d0 * (hR + hL) * (g * delb + delp / rho) - -c !determine a few quanitites needed for steady state wave if iterated - hLstar=hL - hRstar=hR - uLstar=uL - uRstar=uR - huLstar=uLstar*hLstar - huRstar=uRstar*hRstar - - !iterate to better determine the steady state wave - convergencetol=1.d-6 - do iter=1,maxiter - !determine steady state wave (this will be subtracted from the delta vectors) - if (min(hLstar,hRstar).lt.drytol.and.rarecorrector) then - rarecorrector=.false. - hLstar=hL - hRstar=hR - uLstar=uL - uRstar=uR - huLstar=uLstar*hLstar - huRstar=uRstar*hRstar - lambda(2) = 0.5d0*(lambda(1)+lambda(3)) -c lambda(2) = max(min(0.5d0*(s1m+s2m),sE2),sE1) - r(1,2)=0.d0 - r(2,2)=0.d0 - r(3,2)=1.d0 - endif - - hbar = max(0.5d0*(hLstar+hRstar),0.d0) - s1s2bar = 0.25d0*(uLstar+uRstar)**2 - g*hbar - s1s2tilde= max(0.d0,uLstar*uRstar) - g*hbar - -c !find if sonic problem - ! MODIFIED from 5.3.1 version - sonic = .false. - if (abs(s1s2bar) <= criticaltol) then - sonic = .true. - else if (s1s2bar*s1s2tilde <= criticaltol**2) then - sonic = .true. - else if (s1s2bar*sE1*sE2 <= criticaltol**2) then - sonic = .true. - else if (min(abs(sE1),abs(sE2)) < criticaltol_2) then - sonic = .true. - else if (sE1 < criticaltol_2 .and. s1m > -criticaltol_2) then - sonic = .true. - else if (sE2 > -criticaltol_2 .and. s2m < criticaltol_2) then - sonic = .true. - else if ((uL+dsqrt(g*hL))*(uR+dsqrt(g*hR)) < 0.d0) then - sonic = .true. - else if ((uL- dsqrt(g*hL))*(uR- dsqrt(g*hR)) < 0.d0) then - sonic = .true. - end if - -c !find jump in h, deldelh - if (sonic) then - deldelh = -delb - else - deldelh = delb*g*hbar/s1s2bar - endif -c !find bounds in case of critical state resonance, or negative states - if (sE1.lt.-criticaltol.and.sE2.gt.criticaltol) then - deldelh = min(deldelh,hstarHLL*(sE2-sE1)/sE2) - deldelh = max(deldelh,hstarHLL*(sE2-sE1)/sE1) - elseif (sE1.ge.criticaltol) then - deldelh = min(deldelh,hstarHLL*(sE2-sE1)/sE1) - deldelh = max(deldelh,-hL) - elseif (sE2.le.-criticaltol) then - deldelh = min(deldelh,hR) - deldelh = max(deldelh,hstarHLL*(sE2-sE1)/sE2) - endif - -c ! adjust deldelh for well-balancing of atmospheric pressure difference - deldelh = deldelh - delP/(rho*g) - -c !find jump in phi, deldelphi - if (sonic) then - deldelphi = -g*hbar*delb - else - deldelphi = -delb*g*hbar*s1s2tilde/s1s2bar - endif -c !find bounds in case of critical state resonance, or negative states - deldelphi=min(deldelphi,g*max(-hLstar*delb,-hRstar*delb)) - deldelphi=max(deldelphi,g*min(-hLstar*delb,-hRstar*delb)) - deldelphi = deldelphi - hbar * delp / rho - - del(1)=delh-deldelh - del(2)=delhu - del(3)=delphi-deldelphi - -c !Determine determinant of eigenvector matrix======== - det1=r(1,1)*(r(2,2)*r(3,3)-r(2,3)*r(3,2)) - det2=r(1,2)*(r(2,1)*r(3,3)-r(2,3)*r(3,1)) - det3=r(1,3)*(r(2,1)*r(3,2)-r(2,2)*r(3,1)) - determinant=det1-det2+det3 - -c !solve for beta(k) using Cramers Rule================= - do k=1,3 - do mw=1,3 - A(1,mw)=r(1,mw) - A(2,mw)=r(2,mw) - A(3,mw)=r(3,mw) - enddo - A(1,k)=del(1) - A(2,k)=del(2) - A(3,k)=del(3) - det1=A(1,1)*(A(2,2)*A(3,3)-A(2,3)*A(3,2)) - det2=A(1,2)*(A(2,1)*A(3,3)-A(2,3)*A(3,1)) - det3=A(1,3)*(A(2,1)*A(3,2)-A(2,2)*A(3,1)) - beta(k)=(det1-det2+det3)/determinant - enddo - - !exit if things aren't changing - if (abs(del(1)**2+del(3)**2-delnorm).lt.convergencetol) exit - delnorm = del(1)**2+del(3)**2 - !find new states qLstar and qRstar on either side of interface - hLstar=hL - hRstar=hR - uLstar=uL - uRstar=uR - huLstar=uLstar*hLstar - huRstar=uRstar*hRstar - do mw=1,mwaves - if (lambda(mw).lt.0.d0) then - hLstar= hLstar + beta(mw)*r(1,mw) - huLstar= huLstar + beta(mw)*r(2,mw) - endif - enddo - do mw=mwaves,1,-1 - if (lambda(mw).gt.0.d0) then - hRstar= hRstar - beta(mw)*r(1,mw) - huRstar= huRstar - beta(mw)*r(2,mw) - endif - enddo - - if (hLstar.gt.drytol) then - uLstar=huLstar/hLstar - else - hLstar=max(hLstar,0.d0) - uLstar=0.d0 - endif - if (hRstar.gt.drytol) then - uRstar=huRstar/hRstar - else - hRstar=max(hRstar,0.d0) - uRstar=0.d0 - endif - - enddo ! end iteration on Riemann problem - - fw(:,:) = 0.d0 ! initialize before setting some waves - - do mw=1,mwaves - sw(mw)=lambda(mw) - fw(1,mw)=beta(mw)*r(2,mw) - fw(2,mw)=beta(mw)*r(3,mw) - fw(3,mw)=beta(mw)*r(2,mw) - enddo - !find transverse components (ie huv jumps). - ! MODIFIED from 5.3.1 version - fw(3,1)=fw(3,1)*vL - fw(3,3)=fw(3,3)*vR - fw(3,2)= 0.d0 - - hustar_interface = huL + fw(1,1) ! = huR - fw(1,3) - if (hustar_interface <= 0.0d0) then - fw(3,1) = fw(3,1) + (hR*uR*vR - hL*uL*vL - fw(3,1)- fw(3,3)) - else - fw(3,3) = fw(3,3) + (hR*uR*vR - hL*uL*vL - fw(3,1)- fw(3,3)) - end if - - - return - - end !subroutine riemann_aug_JCP------------------------------------------------- - - -c----------------------------------------------------------------------- - subroutine riemann_ssqfwave(maxiter,meqn,mwaves,hL,hR,huL,huR, - & hvL,hvR,bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,sE1,sE2,drytol,g, - & rho,sw,fw) - - ! solve shallow water equations given single left and right states - ! steady state wave is subtracted from delta [q,f]^T before decomposition - - implicit none - - !input - integer meqn,mwaves,maxiter - - double precision hL,hR,huL,huR,bL,bR,uL,uR,phiL,phiR,sE1,sE2 - double precision vL,vR,hvL,hvR,pL,pR - double precision drytol,g,rho - - !local - integer iter - - logical sonic - - double precision delh,delhu,delphi,delb,delhdecomp,delphidecomp - double precision s1s2bar,s1s2tilde,hbar,hLstar,hRstar,hustar - double precision uRstar,uLstar,hstarHLL - double precision deldelh,deldelphi,delP - double precision alpha1,alpha2,beta1,beta2,delalpha1,delalpha2 - double precision criticaltol,convergencetol - double precision sL,sR - double precision uhat,chat,sRoe1,sRoe2 - - double precision sw(mwaves) - double precision fw(meqn,mwaves) - - !determine del vectors - delh = hR-hL - delhu = huR-huL - delphi = phiR-phiL - delb = bR-bL - delP = pR - pL - - convergencetol= 1.d-16 - criticaltol = 1.d-99 - - deldelh = -delb - deldelphi = -0.5d0 * (hR + hL) * (g * delb + delP / rho) - -! !if no source term, skip determining steady state wave - if (abs(delb).gt.0.d0) then -! - !determine a few quanitites needed for steady state wave if iterated - hLstar=hL - hRstar=hR - uLstar=uL - uRstar=uR - hstarHLL = max((huL-huR+sE2*hR-sE1*hL)/(sE2-sE1),0.d0) ! middle state in an HLL solve - - alpha1=0.d0 - alpha2=0.d0 - -! !iterate to better determine Riemann problem - do iter=1,maxiter - - !determine steady state wave (this will be subtracted from the delta vectors) - hbar = max(0.5d0*(hLstar+hRstar),0.d0) - s1s2bar = 0.25d0*(uLstar+uRstar)**2 - g*hbar - s1s2tilde= max(0.d0,uLstar*uRstar) - g*hbar - - -c !find if sonic problem - sonic=.false. - if (abs(s1s2bar).le.criticaltol) sonic=.true. - if (s1s2bar*s1s2tilde.le.criticaltol) sonic=.true. - if (s1s2bar*sE1*sE2.le.criticaltol) sonic = .true. - if (min(abs(sE1),abs(sE2)).lt.criticaltol) sonic=.true. - -c !find jump in h, deldelh - if (sonic) then - deldelh = -delb - else - deldelh = delb*g*hbar/s1s2bar - endif -! !bounds in case of critical state resonance, or negative states - if (sE1.lt.-criticaltol.and.sE2.gt.criticaltol) then - deldelh = min(deldelh,hstarHLL*(sE2-sE1)/sE2) - deldelh = max(deldelh,hstarHLL*(sE2-sE1)/sE1) - elseif (sE1.ge.criticaltol) then - deldelh = min(deldelh,hstarHLL*(sE2-sE1)/sE1) - deldelh = max(deldelh,-hL) - elseif (sE2.le.-criticaltol) then - deldelh = min(deldelh,hR) - deldelh = max(deldelh,hstarHLL*(sE2-sE1)/sE2) - endif - -c !find jump in phi, deldelphi - if (sonic) then - deldelphi = -g*hbar*delb - else - deldelphi = -delb*g*hbar*s1s2tilde/s1s2bar - endif -! !bounds in case of critical state resonance, or negative states - deldelphi=min(deldelphi,g*max(-hLstar*delb,-hRstar*delb)) - deldelphi=max(deldelphi,g*min(-hLstar*delb,-hRstar*delb)) - -!---------determine fwaves ------------------------------------------ - -! !first decomposition - delhdecomp = delh-deldelh - delalpha1 = (sE2*delhdecomp - delhu)/(sE2-sE1)-alpha1 - alpha1 = alpha1 + delalpha1 - delalpha2 = (delhu - sE1*delhdecomp)/(sE2-sE1)-alpha2 - alpha2 = alpha2 + delalpha2 - - !second decomposition - delphidecomp = delphi - deldelphi - beta1 = (sE2*delhu - delphidecomp)/(sE2-sE1) - beta2 = (delphidecomp - sE1*delhu)/(sE2-sE1) - - if ((delalpha2**2+delalpha1**2).lt.convergencetol**2) then - exit - endif -! - if (sE2.gt.0.d0.and.sE1.lt.0.d0) then - hLstar=hL+alpha1 - hRstar=hR-alpha2 -c hustar=huL+alpha1*sE1 - hustar = huL + beta1 - elseif (sE1.ge.0.d0) then - hLstar=hL - hustar=huL - hRstar=hR - alpha1 - alpha2 - elseif (sE2.le.0.d0) then - hRstar=hR - hustar=huR - hLstar=hL + alpha1 + alpha2 - endif -! - if (hLstar.gt.drytol) then - uLstar=hustar/hLstar - else - hLstar=max(hLstar,0.d0) - uLstar=0.d0 - endif -! - if (hRstar.gt.drytol) then - uRstar=hustar/hRstar - else - hRstar=max(hRstar,0.d0) - uRstar=0.d0 - endif - - enddo - endif - - delhdecomp = delh - deldelh - delphidecomp = delphi - deldelphi - - !first decomposition - alpha1 = (sE2*delhdecomp - delhu)/(sE2-sE1) - alpha2 = (delhu - sE1*delhdecomp)/(sE2-sE1) - - !second decomposition - beta1 = (sE2*delhu - delphidecomp)/(sE2-sE1) - beta2 = (delphidecomp - sE1*delhu)/(sE2-sE1) - - ! 1st nonlinear wave - fw(1,1) = alpha1*sE1 - fw(2,1) = beta1*sE1 - fw(3,1) = fw(1,1)*vL - ! 2nd nonlinear wave - fw(1,3) = alpha2*sE2 - fw(2,3) = beta2*sE2 - fw(3,3) = fw(1,3)*vR - ! advection of transverse wave - fw(1,2) = 0.d0 - fw(2,2) = 0.d0 - fw(3,2) = hR*uR*vR - hL*uL*vL -fw(3,1)-fw(3,3) - !speeds - sw(1)=sE1 - sw(2)=0.5d0*(sE1+sE2) - sw(3)=sE2 - - return - - end subroutine !------------------------------------------------- - - -c----------------------------------------------------------------------- - subroutine riemann_fwave(meqn,mwaves,hL,hR,huL,huR,hvL,hvR, - & bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,s1,s2,drytol,g,rho, - & sw,fw) - - ! solve shallow water equations given single left and right states - ! solution has two waves. - ! flux - source is decomposed. - - implicit none - - !input - integer meqn,mwaves - - double precision hL,hR,huL,huR,bL,bR,uL,uR,phiL,phiR,s1,s2 - double precision hvL,hvR,vL,vR,pL,pR - double precision drytol,g,rho - - double precision sw(mwaves) - double precision fw(meqn,mwaves) - - !local - double precision delh,delhu,delphi,delb,delhdecomp,delphidecomp - double precision deldelh,deldelphi,delP - double precision beta1,beta2 - - - !determine del vectors - delh = hR-hL - delhu = huR-huL - delphi = phiR-phiL - delb = bR-bL - delP = pR - pL - - deldelphi = -0.5d0 * (hR + hL) * (g * delb + delP / rho) - delphidecomp = delphi - deldelphi - - !flux decomposition - beta1 = (s2*delhu - delphidecomp)/(s2-s1) - beta2 = (delphidecomp - s1*delhu)/(s2-s1) - - sw(1)=s1 - sw(2)=0.5d0*(s1+s2) - sw(3)=s2 - ! 1st nonlinear wave - fw(1,1) = beta1 - fw(2,1) = beta1*s1 - fw(3,1) = beta1*vL - ! 2nd nonlinear wave - fw(1,3) = beta2 - fw(2,3) = beta2*s2 - fw(3,3) = beta2*vR - ! advection of transverse wave - fw(1,2) = 0.d0 - fw(2,2) = 0.d0 - fw(3,2) = hR*uR*vR - hL*uL*vL -fw(3,1)-fw(3,3) - return - - end !subroutine ------------------------------------------------- - - - - - -c============================================================================= - subroutine riemanntype(hL,hR,uL,uR,hm,s1m,s2m,rare1,rare2, - & maxiter,drytol,g) - - !determine the Riemann structure (wave-type in each family) - - - implicit none - - !input - double precision hL,hR,uL,uR,drytol,g - integer maxiter - - !output - double precision s1m,s2m - logical rare1,rare2 - - !local - double precision hm,u1m,u2m,um,delu - double precision h_max,h_min,h0,F_max,F_min,dfdh,F0,slope,gL,gR - integer iter - - - -c !Test for Riemann structure - - h_min=min(hR,hL) - h_max=max(hR,hL) - delu=uR-uL - - if (h_min.le.drytol) then - hm=0.d0 - um=0.d0 - s1m=uR+uL-2.d0*sqrt(g*hR)+2.d0*sqrt(g*hL) - s2m=uR+uL-2.d0*sqrt(g*hR)+2.d0*sqrt(g*hL) - if (hL.le.0.d0) then - rare2=.true. - rare1=.false. - else - rare1=.true. - rare2=.false. - endif - - else - F_min= delu+2.d0*(sqrt(g*h_min)-sqrt(g*h_max)) - F_max= delu + - & (h_max-h_min)*(sqrt(.5d0*g*(h_max+h_min)/(h_max*h_min))) - - if (F_min.gt.0.d0) then !2-rarefactions - - hm=(1.d0/(16.d0*g))* - & max(0.d0,-delu+2.d0*(sqrt(g*hL)+sqrt(g*hR)))**2 - um=sign(1.d0,hm)*(uL+2.d0*(sqrt(g*hL)-sqrt(g*hm))) - - s1m=uL+2.d0*sqrt(g*hL)-3.d0*sqrt(g*hm) - s2m=uR-2.d0*sqrt(g*hR)+3.d0*sqrt(g*hm) - - rare1=.true. - rare2=.true. - - elseif (F_max.le.0.d0) then !2 shocks - -c !root finding using a Newton iteration on sqrt(h)=== - h0=h_max - do iter=1,maxiter - gL=sqrt(.5d0*g*(1/h0 + 1/hL)) - gR=sqrt(.5d0*g*(1/h0 + 1/hR)) - F0=delu+(h0-hL)*gL + (h0-hR)*gR - dfdh=gL-g*(h0-hL)/(4.d0*(h0**2)*gL)+ - & gR-g*(h0-hR)/(4.d0*(h0**2)*gR) - slope=2.d0*sqrt(h0)*dfdh - h0=(sqrt(h0)-F0/slope)**2 - enddo - hm=h0 - u1m=uL-(hm-hL)*sqrt((.5d0*g)*(1/hm + 1/hL)) - u2m=uR+(hm-hR)*sqrt((.5d0*g)*(1/hm + 1/hR)) - um=.5d0*(u1m+u2m) - - s1m=u1m-sqrt(g*hm) - s2m=u2m+sqrt(g*hm) - rare1=.false. - rare2=.false. - - else !one shock one rarefaction - h0=h_min - - do iter=1,maxiter - F0=delu + 2.d0*(sqrt(g*h0)-sqrt(g*h_max)) - & + (h0-h_min)*sqrt(.5d0*g*(1/h0+1/h_min)) - slope=(F_max-F0)/(h_max-h_min) - h0=h0-F0/slope - enddo - - hm=h0 - if (hL.gt.hR) then - um=uL+2.d0*sqrt(g*hL)-2.d0*sqrt(g*hm) - s1m=uL+2.d0*sqrt(g*hL)-3.d0*sqrt(g*hm) - s2m=uL+2.d0*sqrt(g*hL)-sqrt(g*hm) - rare1=.true. - rare2=.false. - else - s2m=uR-2.d0*sqrt(g*hR)+3.d0*sqrt(g*hm) - s1m=uR-2.d0*sqrt(g*hR)+sqrt(g*hm) - um=uR-2.d0*sqrt(g*hR)+2.d0*sqrt(g*hm) - rare2=.true. - rare1=.false. - endif - endif - endif - - return - - end ! subroutine riemanntype---------------------------------------------------------------- diff --git a/src/2d/bouss/rpn2_geoclaw.f b/src/2d/bouss/rpn2_geoclaw.f deleted file mode 100644 index 076fcd79f..000000000 --- a/src/2d/bouss/rpn2_geoclaw.f +++ /dev/null @@ -1,305 +0,0 @@ -c====================================================================== - subroutine rpn2(ixy,maxm,meqn,mwaves,maux,mbc,mx, - & ql,qr,auxl,auxr,fwave,s,amdq,apdq) -c====================================================================== -c -c Solves normal Riemann problems for the 2D SHALLOW WATER equations -c with topography: -c # h_t + (hu)_x + (hv)_y = 0 # -c # (hu)_t + (hu^2 + 0.5gh^2)_x + (huv)_y = -ghb_x # -c # (hv)_t + (huv)_x + (hv^2 + 0.5gh^2)_y = -ghb_y # - -c On input, ql contains the state vector at the left edge of each cell -c qr contains the state vector at the right edge of each cell -c -c This data is along a slice in the x-direction if ixy=1 -c or the y-direction if ixy=2. - -c Note that the i'th Riemann problem has left state qr(i-1,:) -c and right state ql(i,:) -c From the basic clawpack routines, this routine is called with -c ql = qr -c -c -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! ! -! # This Riemann solver is for the shallow water equations. ! -! ! -! It allows the user to easily select a Riemann solver in ! -! riemannsolvers_geo.f. this routine initializes all the variables ! -! for the shallow water equations, accounting for wet dry boundary ! -! dry cells, wave speeds etc. ! -! ! -! David George, Vancouver WA, Feb. 2009 ! -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - use geoclaw_module, only: g => grav, drytol => dry_tolerance, rho - use geoclaw_module, only: earth_radius, deg2rad - use amr_module, only: mcapa - - use storm_module, only: pressure_forcing, pressure_index - - implicit none - - !input - integer maxm,meqn,maux,mwaves,mbc,mx,ixy - - double precision fwave(meqn, mwaves, 1-mbc:maxm+mbc) - double precision s(mwaves, 1-mbc:maxm+mbc) - double precision ql(meqn, 1-mbc:maxm+mbc) - double precision qr(meqn, 1-mbc:maxm+mbc) - double precision apdq(meqn,1-mbc:maxm+mbc) - double precision amdq(meqn,1-mbc:maxm+mbc) - double precision auxl(maux,1-mbc:maxm+mbc) - double precision auxr(maux,1-mbc:maxm+mbc) - - !local only - integer m,i,mw,maxiter,mu,nv - double precision wall(3) - double precision fw(meqn,3) - double precision sw(3) - - double precision hR,hL,huR,huL,uR,uL,hvR,hvL,vR,vL,phiR,phiL,pL,pR - double precision bR,bL,sL,sR,sRoe1,sRoe2,sE1,sE2,uhat,chat - double precision s1m,s2m - double precision hstar,hstartest,hstarHLL,sLtest,sRtest - double precision tw,dxdc - - logical :: debug - - logical rare1,rare2 - - ! In case there is no pressure forcing - pL = 0.d0 - pR = 0.d0 - debug = .false. - - ! initialize all components to 0 - fw(:,:) = 0.d0 - fwave(:,:,:) = 0.d0 - s(:,:) = 0.d0 - amdq(:,:) = 0.d0 - apdq(:,:) = 0.d0 - - !loop through Riemann problems at each grid cell - do i=2-mbc,mx+mbc - -!-----------------------Initializing----------------------------------- - !inform of a bad riemann problem from the start - if((qr(1,i-1).lt.0.d0).or.(ql(1,i) .lt. 0.d0)) then - write(*,*) 'Negative input: hl,hr,i=',qr(1,i-1),ql(1,i),i - endif - - -c !set normal direction - if (ixy.eq.1) then - mu=2 - nv=3 - else - mu=3 - nv=2 - endif - - !zero (small) negative values if they exist - if (qr(1,i-1).lt.0.d0) then - qr(1,i-1)=0.d0 - qr(2,i-1)=0.d0 - qr(3,i-1)=0.d0 - endif - - if (ql(1,i).lt.0.d0) then - ql(1,i)=0.d0 - ql(2,i)=0.d0 - ql(3,i)=0.d0 - endif - - !skip problem if in a completely dry area - if (qr(1,i-1) <= drytol .and. ql(1,i) <= drytol) then - go to 30 - endif - - !Riemann problem variables - hL = qr(1,i-1) - hR = ql(1,i) - huL = qr(mu,i-1) - huR = ql(mu,i) - bL = auxr(1,i-1) - bR = auxl(1,i) - if (pressure_forcing) then - pL = auxr(pressure_index, i-1) - pR = auxl(pressure_index, i) - end if - - hvL=qr(nv,i-1) - hvR=ql(nv,i) - - !check for wet/dry boundary - if (hR.gt.drytol) then - uR=huR/hR - vR=hvR/hR - phiR = 0.5d0*g*hR**2 + huR**2/hR - else - hR = 0.d0 - huR = 0.d0 - hvR = 0.d0 - uR = 0.d0 - vR = 0.d0 - phiR = 0.d0 - endif - - if (hL.gt.drytol) then - uL=huL/hL - vL=hvL/hL - phiL = 0.5d0*g*hL**2 + huL**2/hL - else - hL=0.d0 - huL=0.d0 - hvL=0.d0 - uL=0.d0 - vL=0.d0 - phiL = 0.d0 - endif - - wall(1) = 1.d0 - wall(2) = 1.d0 - wall(3) = 1.d0 - if (hR.le.drytol) then - call riemanntype(hL,hL,uL,-uL,hstar,s1m,s2m, - & rare1,rare2,1,drytol,g) - hstartest=max(hL,hstar) - if (hstartest+bL.lt.bR) then !right state should become ghost values that mirror left for wall problem -c bR=hstartest+bL - wall(2)=0.d0 - wall(3)=0.d0 - hR=hL - huR=-huL - bR=bL - phiR=phiL - uR=-uL - vR=vL - elseif (hL+bL.lt.bR) then - bR=hL+bL - endif - elseif (hL.le.drytol) then ! right surface is lower than left topo - call riemanntype(hR,hR,-uR,uR,hstar,s1m,s2m, - & rare1,rare2,1,drytol,g) - hstartest=max(hR,hstar) - if (hstartest+bR.lt.bL) then !left state should become ghost values that mirror right -c bL=hstartest+bR - wall(1)=0.d0 - wall(2)=0.d0 - hL=hR - huL=-huR - bL=bR - phiL=phiR - uL=-uR - vL=vR - elseif (hR+bR.lt.bL) then - bL=hR+bR - endif - endif - - !determine wave speeds - sL=uL-sqrt(g*hL) ! 1 wave speed of left state - sR=uR+sqrt(g*hR) ! 2 wave speed of right state - - uhat=(sqrt(g*hL)*uL + sqrt(g*hR)*uR)/(sqrt(g*hR)+sqrt(g*hL)) ! Roe average - chat=sqrt(g*0.5d0*(hR+hL)) ! Roe average - sRoe1=uhat-chat ! Roe wave speed 1 wave - sRoe2=uhat+chat ! Roe wave speed 2 wave - - sE1 = min(sL,sRoe1) ! Eindfeldt speed 1 wave - sE2 = max(sR,sRoe2) ! Eindfeldt speed 2 wave - - !--------------------end initializing...finally---------- - !solve Riemann problem. - - maxiter = 1 - - call riemann_aug_JCP(maxiter,meqn,mwaves,hL,hR,huL, - & huR,hvL,hvR,bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,sE1,sE2, - & drytol,g,rho,sw,fw) - -C call riemann_ssqfwave(maxiter,meqn,mwaves,hL,hR,huL,huR, -C & hvL,hvR,bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,sE1,sE2,drytol,g, -C & rho,sw,fw) - -C call riemann_fwave(meqn,mwaves,hL,hR,huL,huR,hvL,hvR, -C & bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,sE1,sE2,drytol,g,rho,sw, -C & fw) - -c !eliminate ghost fluxes for wall - do mw=1,3 - sw(mw)=sw(mw)*wall(mw) - - fw(1,mw)=fw(1,mw)*wall(mw) - fw(2,mw)=fw(2,mw)*wall(mw) - fw(3,mw)=fw(3,mw)*wall(mw) - enddo - - do mw=1,mwaves - s(mw,i)=sw(mw) - fwave(1,mw,i)=fw(1,mw) - fwave(mu,mw,i)=fw(2,mw) - fwave(nv,mw,i)=fw(3,mw) -! write(51,515) sw(mw),fw(1,mw),fw(2,mw),fw(3,mw) -!515 format("++sw",4e25.16) - enddo - - 30 continue - enddo - - -c==========Capacity for mapping from latitude longitude to physical space==== - if (mcapa.gt.0) then - do i=2-mbc,mx+mbc - if (ixy.eq.1) then - dxdc=(earth_radius*deg2rad) - else - dxdc=earth_radius*cos(auxl(3,i))*deg2rad - endif - - do mw=1,mwaves -c if (s(mw,i) .gt. 316.d0) then -c # shouldn't happen unless h > 10 km! -c write(6,*) 'speed > 316: i,mw,s(mw,i): ',i,mw,s(mw,i) -c endif - s(mw,i)=dxdc*s(mw,i) - fwave(1,mw,i)=dxdc*fwave(1,mw,i) - fwave(2,mw,i)=dxdc*fwave(2,mw,i) - fwave(3,mw,i)=dxdc*fwave(3,mw,i) - enddo - enddo - endif - -c=============================================================================== - - -c============= compute fluctuations============================================= - - do i=2-mbc,mx+mbc - do mw=1,mwaves - if (s(mw,i) < 0.d0) then - amdq(1:3,i) = amdq(1:3,i) + fwave(1:3,mw,i) - else if (s(mw,i) > 0.d0) then - apdq(1:3,i) = apdq(1:3,i) + fwave(1:3,mw,i) - else - amdq(1:3,i) = amdq(1:3,i) + 0.5d0 * fwave(1:3,mw,i) - apdq(1:3,i) = apdq(1:3,i) + 0.5d0 * fwave(1:3,mw,i) - endif - enddo - enddo - - if (debug) then - do i=2-mbc,mx+mbc - do m=1,meqn - write(51,151) m,i,amdq(m,i),apdq(m,i) - write(51,152) fwave(m,1,i),fwave(m,2,i),fwave(m,3,i) - 151 format("+++ ampdq ",2i4,2e25.15) - 152 format("+++ fwave ",8x,3e25.15) - enddo - enddo - endif - - return - end subroutine diff --git a/src/2d/bouss/rpt2_geoclaw_sym.f b/src/2d/bouss/rpt2_geoclaw_sym.f deleted file mode 100644 index ce51c0d3c..000000000 --- a/src/2d/bouss/rpt2_geoclaw_sym.f +++ /dev/null @@ -1,260 +0,0 @@ -! ===================================================== - subroutine rpt2(ixy,imp,maxm,meqn,mwaves,maux,mbc,mx, - & ql,qr,aux1,aux2,aux3,asdq,bmasdq,bpasdq) -! ===================================================== - use geoclaw_module, only: g => grav, tol => dry_tolerance - use geoclaw_module, only: coordinate_system,earth_radius,deg2rad - - implicit none -! -! # Riemann solver in the transverse direction using an einfeldt -! Jacobian. - -!-----------------------last modified 1/10/05---------------------- - - integer ixy,maxm,meqn,maux,mwaves,mbc,mx,imp - - double precision ql(meqn,1-mbc:maxm+mbc) - double precision qr(meqn,1-mbc:maxm+mbc) - double precision asdq(meqn,1-mbc:maxm+mbc) - double precision bmasdq(meqn,1-mbc:maxm+mbc) - double precision bpasdq(meqn,1-mbc:maxm+mbc) - double precision aux1(maux,1-mbc:maxm+mbc) - double precision aux2(maux,1-mbc:maxm+mbc) - double precision aux3(maux,1-mbc:maxm+mbc) - - double precision s(mwaves) - double precision r(meqn,mwaves) - double precision beta(mwaves) - double precision abs_tol - double precision hl,hr,hul,hur,hvl,hvr,vl,vr,ul,ur,bl,br - double precision uhat,vhat,hhat,roe1,roe3,s1,s2,s3,s1down,s3up - double precision delf1,delf2,delf3,dxdcd,dxdcu - double precision dxdcm,dxdcp,topo1,topo3,eta - - integer i,m,mw,mu,mv - - !write(83,*) 'rpt2, imp = ',imp - - ! initialize all components to 0: - bmasdq(:,:) = 0.d0 - bpasdq(:,:) = 0.d0 - - abs_tol=tol - - if (ixy.eq.1) then - mu = 2 - mv = 3 - else - mu = 3 - mv = 2 - endif - - do i=2-mbc,mx+mbc - - hl=qr(1,i-1) - hr=ql(1,i) - hul=qr(mu,i-1) - hur=ql(mu,i) - hvl=qr(mv,i-1) - hvr=ql(mv,i) - -!===========determine velocity from momentum=========================== - if (hl.lt.abs_tol) then - hl=0.d0 - ul=0.d0 - vl=0.d0 - else - ul=hul/hl - vl=hvl/hl - endif - - if (hr.lt.abs_tol) then - hr=0.d0 - ur=0.d0 - vr=0.d0 - else - ur=hur/hr - vr=hvr/hr - endif - - do mw=1,mwaves - s(mw)=0.d0 - beta(mw)=0.d0 - do m=1,meqn - r(m,mw)=0.d0 - enddo - enddo - dxdcp = 1.d0 - dxdcm = 1.d0 - - if (hl <= tol .and. hr <= tol) go to 90 - -* !check and see if cell that transverse waves are going in is high and dry - if (imp.eq.1) then - eta = qr(1,i-1) + aux2(1,i-1) - topo1 = aux1(1,i-1) - topo3 = aux3(1,i-1) -c s1 = vl-sqrt(g*hl) -c s3 = vl+sqrt(g*hl) -c s2 = 0.5d0*(s1+s3) - else - eta = ql(1,i) + aux2(1,i) - topo1 = aux1(1,i) - topo3 = aux3(1,i) -c s1 = vr-sqrt(g*hr) -c s3 = vr+sqrt(g*hr) -c s2 = 0.5d0*(s1+s3) - endif - if (eta.lt.max(topo1,topo3)) go to 90 - - if (coordinate_system.eq.2) then - if (ixy.eq.2) then - dxdcp=(earth_radius*deg2rad) - dxdcm = dxdcp - else - if (imp.eq.1) then - dxdcp = earth_radius*cos(aux3(3,i-1))*deg2rad - dxdcm = earth_radius*cos(aux1(3,i-1))*deg2rad - else - dxdcp = earth_radius*cos(aux3(3,i))*deg2rad - dxdcm = earth_radius*cos(aux1(3,i))*deg2rad - endif - endif - endif - -c=====Determine some speeds necessary for the Jacobian================= -c vhat=(vr*dsqrt(hr))/(dsqrt(hr)+dsqrt(hl)) + -c & (vl*dsqrt(hl))/(dsqrt(hr)+dsqrt(hl)) -c -c uhat=(ur*dsqrt(hr))/(dsqrt(hr)+dsqrt(hl)) + -c & (ul*dsqrt(hl))/(dsqrt(hr)+dsqrt(hl)) -c hhat=(hr+hl)/2.d0 - - ! Note that we used left right states to define Roe averages, - ! which is consistent with those used in rpn2. - ! But now we are computing upgoing, downgoing waves either in - ! cell on left (if imp==1) or on right (if imp==2) so we - ! should perhaps use Roe averages based on values above or below, - ! but these aren't available. - - !roe1=vhat-dsqrt(g*hhat) - !roe3=vhat+dsqrt(g*hhat) - - ! modified to at least use hl,vl or hr,vr properly based on imp: - ! (since s1 and s3 are now vertical velocities, - ! it made no sense to use h,v in cell i-1 for downgoing - ! and cell i for upgoing) - - if (imp == 1) then - ! asdq is leftgoing, use q from cell i-1: - if (hl <= tol) go to 90 - s1 = vl-dsqrt(g*hl) - s3 = vl+dsqrt(g*hl) - s2 = vl - uhat = ul - hhat = hl - else - ! asdq is rightgoing, use q from cell i: - if (hr <= tol) go to 90 - s1 = vr-dsqrt(g*hr) - s3 = vr+dsqrt(g*hr) - s2 = vr - uhat = ur - hhat = hr - endif - - ! don't use Roe averages: - !s1=dmin1(roe1,s1down) - !s3=dmax1(roe3,s3up) - - !s2=0.5d0*(s1+s3) - - s(1)=s1 - s(2)=s2 - s(3)=s3 -c=======================Determine asdq decomposition (beta)============ - delf1=asdq(1,i) - delf2=asdq(mu,i) - delf3=asdq(mv, i) - - ! fixed bug in beta(2): uhat in place of s(2) - beta(1) = (s3*delf1/(s3-s1))-(delf3/(s3-s1)) - beta(2) = -uhat*delf1 + delf2 - beta(3) = (delf3/(s3-s1))-(s1*delf1/(s3-s1)) -c======================End ================================================= - -c=====================Set-up eigenvectors=================================== - r(1,1) = 1.d0 - r(2,1) = uhat ! fixed bug, uhat not s2 - r(3,1) = s1 - - r(1,2) = 0.d0 - r(2,2) = 1.d0 - r(3,2) = 0.d0 - - r(1,3) = 1.d0 - r(2,3) = uhat ! fixed bug, uhat not s2 - r(3,3) = s3 -c============================================================================ -90 continue -c============= compute fluctuations========================================== - - bmasdq(1,i)=0.0d0 - bpasdq(1,i)=0.0d0 - bmasdq(2,i)=0.0d0 - bpasdq(2,i)=0.0d0 - bmasdq(3,i)=0.0d0 - bpasdq(3,i)=0.0d0 - - do mw=1,3 - if ((abs(s(mw)) > 0.d0) .and. - & (abs(s(mw)) < 0.001d0*dsqrt(g*hhat))) then - ! split correction symmetrically if nearly zero - ! Note wave drops out if s(mw)==0 exactly, so don't split - bmasdq(1,i) =bmasdq(1,i) + - & 0.5d0*dxdcm*s(mw)*beta(mw)*r(1,mw) - bmasdq(mu,i)=bmasdq(mu,i)+ - & 0.5d0*dxdcm*s(mw)*beta(mw)*r(2,mw) - bmasdq(mv,i)=bmasdq(mv,i)+ - & 0.5d0*dxdcm*s(mw)*beta(mw)*r(3,mw) - bpasdq(1,i) =bpasdq(1,i) + - & 0.5d0*dxdcp*s(mw)*beta(mw)*r(1,mw) - bpasdq(mu,i)=bpasdq(mu,i)+ - & 0.5d0*dxdcp*s(mw)*beta(mw)*r(2,mw) - bpasdq(mv,i)=bpasdq(mv,i)+ - & 0.5d0*dxdcp*s(mw)*beta(mw)*r(3,mw) - elseif (s(mw).lt.0.d0) then - bmasdq(1,i) =bmasdq(1,i) + dxdcm*s(mw)*beta(mw)*r(1,mw) - bmasdq(mu,i)=bmasdq(mu,i)+ dxdcm*s(mw)*beta(mw)*r(2,mw) - bmasdq(mv,i)=bmasdq(mv,i)+ dxdcm*s(mw)*beta(mw)*r(3,mw) - elseif (s(mw).gt.0.d0) then - bpasdq(1,i) =bpasdq(1,i) + dxdcp*s(mw)*beta(mw)*r(1,mw) - bpasdq(mu,i)=bpasdq(mu,i)+ dxdcp*s(mw)*beta(mw)*r(2,mw) - bpasdq(mv,i)=bpasdq(mv,i)+ dxdcp*s(mw)*beta(mw)*r(3,mw) - endif - enddo - - !if ((i>3) .and. (i<6)) then - if (.false.) then - ! DEBUG - write(83,*) 'i = ',i - write(83,831) s(1),s(2),s(3) - write(83,831) beta(1),beta(2),beta(3) - do m=1,3 - write(83,831) r(m,1),r(m,2),r(m,3) - enddo - do m=1,3 - write(83,831) asdq(m,i), bmasdq(m,i), bpasdq(m,i) - 831 format(3d20.12) - enddo - endif -c======================================================================== - enddo ! do i loop -c - - -c - - return - end diff --git a/src/2d/shallow/fgout_module.f90 b/src/2d/shallow/fgout_module.f90 index bd9effda3..981d11ac3 100644 --- a/src/2d/shallow/fgout_module.f90 +++ b/src/2d/shallow/fgout_module.f90 @@ -8,18 +8,24 @@ module fgout_module ! Grid data real(kind=8), pointer :: early(:,:,:) real(kind=8), pointer :: late(:,:,:) - + ! Geometry - integer :: num_vars(2),mx,my,point_style,fgno,output_format + integer :: num_vars,mx,my,point_style,fgno,output_format,output_style real(kind=8) :: dx,dy,x_low,x_hi,y_low,y_hi - + ! Time Tracking and output types integer :: num_output,next_output_index real(kind=8) :: start_time,end_time,dt integer, allocatable :: output_frames(:) real(kind=8), allocatable :: output_times(:) - end type fgout_grid + + integer :: nqout ! number of q components to output (+1 for eta) + logical, allocatable :: iqout(:) ! which components to output + integer :: bathy_index,eta_index + logical :: dclaw ! False for GeoClaw + + end type fgout_grid logical, private :: module_setup = .false. @@ -32,27 +38,27 @@ module fgout_module contains - - + + ! Setup routine that reads in the fixed grids data file and sets up the ! appropriate data structures - + subroutine set_fgout(rest,fname) use amr_module, only: parmunit, tstart_thisrun implicit none - + ! Subroutine arguments logical :: rest ! restart? character(len=*), optional, intent(in) :: fname - + ! Local storage integer, parameter :: unit = 7 integer :: i,k type(fgout_grid), pointer :: fg real(kind=8) :: ts - + if (.not.module_setup) then @@ -75,7 +81,7 @@ subroutine set_fgout(rest,fname) write(parmunit,*) ' No fixed grids specified for output' return endif - + ! Allocate fixed grids (not the data yet though) allocate(FGOUT_fgrids(FGOUT_num_grids)) @@ -84,69 +90,85 @@ subroutine set_fgout(rest,fname) fg => FGOUT_fgrids(i) ! Read in this grid's data read(unit,*) fg%fgno - read(unit,*) fg%start_time - read(unit,*) fg%end_time + read(unit,*) fg%output_style read(unit,*) fg%num_output + allocate(fg%output_times(fg%num_output)) + allocate(fg%output_frames(fg%num_output)) + + if (fg%output_style == 1) then + read(unit,*) fg%start_time + read(unit,*) fg%end_time + else if (fg%output_style == 2) then + read(unit,*) (fg%output_times(k), k=1,fg%num_output) + fg%start_time = fg%output_times(1) + fg%end_time = fg%output_times(fg%num_output) + endif + read(unit,*) fg%point_style read(unit,*) fg%output_format read(unit,*) fg%mx, fg%my read(unit,*) fg%x_low, fg%y_low read(unit,*) fg%x_hi, fg%y_hi - - allocate(fg%output_times(fg%num_output)) - allocate(fg%output_frames(fg%num_output)) - + read(unit,*) fg%dclaw + + ! Initialize next_output_index ! (might be reset below in case of a restart) fg%next_output_index = 1 - + if (fg%point_style .ne. 2) then print *, 'set_fgout: ERROR, unrecognized point_style = ',\ fg%point_style endif - - ! Setup data for this grid - ! Set dtfg (the timestep length between outputs) for each grid - if (fg%end_time <= fg%start_time) then - if (fg%num_output > 1) then - print *,'set_fgout: ERROR for fixed grid', i - print *,'start_time <= end_time yet num_output > 1' - print *,'set end_time > start_time or set num_output = 1' - stop + + if (fg%output_style == 1) then + ! Setup data for this grid + ! Set fg%dt (the timestep length between outputs) + if (fg%end_time <= fg%start_time) then + if (fg%num_output > 1) then + print *,'set_fgout: ERROR for fixed grid', i + print *,'start_time <= end_time yet num_output > 1' + print *,'set end_time > start_time or set num_output = 1' + stop + else + ! only a single fgout time: + fg%dt = 0.d0 + endif else - fg%dt = 0.d0 + if (fg%num_output < 2) then + print *,'set_fgout: ERROR for fixed grid', i + print *,'end_time > start_time, yet num_output = 1' + print *,'set num_output > 2' + stop + else + fg%dt = (fg%end_time - fg%start_time) & + / (fg%num_output - 1) + do k=1,fg%num_output + fg%output_times(k) = fg%start_time + (k-1)*fg%dt + enddo + endif + endif + endif + + do k=1,fg%num_output + if (rest) then + ! don't write initial time or earlier + ts = tstart_thisrun+FGOUT_ttol + else + ! do write initial time + ts = tstart_thisrun-FGOUT_ttol endif - else - if (fg%num_output < 2) then - print *,'set_fgout: ERROR for fixed grid', i - print *,'end_time > start_time, yet num_output = 1' - print *,'set num_output > 2' - stop + + if (fg%output_times(k) < ts) then + ! will not output this time in this run + ! (might have already be done when restarting) + fg%output_frames(k) = -2 + fg%next_output_index = k+1 else - fg%dt = (fg%end_time - fg%start_time) & - / (fg%num_output - 1) - do k=1,fg%num_output - fg%output_times(k) = fg%start_time + (k-1)*fg%dt - if (rest) then - ! don't write initial time or earlier - ts = tstart_thisrun+FGOUT_ttol - else - ! do write initial time - ts = tstart_thisrun-FGOUT_ttol - endif - - if (fg%output_times(k) < ts) then - ! will not output this time in this run - ! (might have already be done when restarting) - fg%output_frames(k) = -2 - fg%next_output_index = k+1 - else - ! will be reset to frameno when this is written - fg%output_frames(k) = -1 - endif - enddo + ! will be reset to frameno when this is written + fg%output_frames(k) = -1 endif - endif + enddo @@ -169,30 +191,87 @@ subroutine set_fgout(rest,fname) else print *,'set_fgout: ERROR for fixed grid', i print *,'y grid points my <= 0, set my >= 1' - endif - - ! set the number of variables stored for each grid - ! this should be (the number of variables you want to write out + 1) - fg%num_vars(1) = 6 + endif + - ! Allocate new fixed grid data array - allocate(fg%early(fg%num_vars(1), fg%mx,fg%my)) - fg%early = nan() + ! For now, hard-wire with defaults for either GeoClaw or D-Claw + ! need to save q plus topo, eta, t for interp in space-time - allocate(fg%late(fg%num_vars(1), fg%mx,fg%my)) - fg%late = nan() + if (fg%dclaw) then + ! For D-Claw: + fg%num_vars = 10 + ! for h, hu, hv, hm, pb, hchi, b_eroded, bathy, eta, time + else + ! GeoClaw: + fg%num_vars = 6 + ! for h, hu, hv, bathy, eta, time + endif + + ! specify which components of q (plus eta?) to output: + ! (eventually this should be set from user input) + + if (fg%num_vars == 6) then + ! GeoClaw + ! indexes used in early and late arrays: + ! 1:3 are q variables, 6 is time + fg%bathy_index = 4 + fg%eta_index = 5 + + allocate(fg%iqout(4)) + fg%iqout(1) = .true. ! output h? + fg%iqout(2) = .true. ! output hu? + fg%iqout(3) = .true. ! output hv? + fg%iqout(4) = .true. ! output eta? + fg%nqout = 0 + do k=1,4 + if (fg%iqout(k)) fg%nqout = fg%nqout + 1 + enddo + endif + + if (fg%num_vars == 10) then + ! D-Claw: + ! indexes used in early and late arrays: + ! 1:7 are q variables, 10 is time + fg%bathy_index = 8 + fg%eta_index = 9 + + allocate(fg%iqout(8)) + fg%iqout(1) = .true. ! output h? + fg%iqout(2) = .true. ! output hu? + fg%iqout(3) = .true. ! output hv? + fg%iqout(4) = .true. ! output hm? + fg%iqout(5) = .true. ! output pb? + fg%iqout(6) = .true. ! output hchi? + fg%iqout(7) = .true. ! output beroded? + fg%iqout(8) = .true. ! output eta? + fg%nqout = 0 + do k=1,8 + if (fg%iqout(k)) fg%nqout = fg%nqout + 1 + enddo + endif + + write(6,*) '+++ nqout = ',fg%nqout + + ! Allocate new fixed grid data arrays at early, late time: + ! dimension (10,:,:) to work for either GeoClaw or D-Claw + allocate(fg%early(10, fg%mx,fg%my)) + fg%early = nan() + + allocate(fg%late(10, fg%mx,fg%my)) + fg%late = nan() + enddo close(unit) - + FGOUT_tcfmax=-1.d16 module_setup = .true. end if end subroutine set_fgout - - + + !=====================FGOUT_INTERP======================================= ! This routine interpolates q and aux on a computational grid ! to an fgout grid not necessarily aligned with the computational grid @@ -201,10 +280,10 @@ end subroutine set_fgout subroutine fgout_interp(fgrid_type,fgrid, & t,q,meqn,mxc,myc,mbc,dxc,dyc,xlowc,ylowc, & maux,aux) - - use geoclaw_module, only: dry_tolerance + + use geoclaw_module, only: dry_tolerance implicit none - + ! Subroutine arguments integer, intent(in) :: fgrid_type type(fgout_grid), intent(inout) :: fgrid @@ -212,12 +291,11 @@ subroutine fgout_interp(fgrid_type,fgrid, & real(kind=8), intent(in) :: t,dxc,dyc,xlowc,ylowc real(kind=8), intent(in) :: q(meqn,1-mbc:mxc+mbc,1-mbc:myc+mbc) real(kind=8), intent(in) :: aux(maux,1-mbc:mxc+mbc,1-mbc:myc+mbc) - + integer, parameter :: method = 0 ! interpolate in space? - + ! Indices integer :: ifg,jfg,m,ic1,ic2,jc1,jc2 - integer :: bathy_index,eta_index ! Tolerances real(kind=8) :: total_depth,depth_indicator,nan_check @@ -225,127 +303,124 @@ subroutine fgout_interp(fgrid_type,fgrid, & ! Geometry real(kind=8) :: xfg,yfg,xc1,xc2,yc1,yc2,xhic,yhic real(kind=8) :: geometry(4) - + real(kind=8) :: points(2,2), eta_tmp - + ! Work arrays for eta interpolation real(kind=8) :: eta(2,2),h(2,2) - - + + ! Alias to data in fixed grid integer :: num_vars real(kind=8), pointer :: fg_data(:,:,:) - - + + ! Setup aliases for specific fixed grid if (fgrid_type == 1) then - num_vars = fgrid%num_vars(1) + num_vars = fgrid%num_vars fg_data => fgrid%early else if (fgrid_type == 2) then - num_vars = fgrid%num_vars(1) + num_vars = fgrid%num_vars fg_data => fgrid%late else write(6,*) '*** Unexpected fgrid_type = ', fgrid_type stop ! fgrid_type==3 is deprecated, use fgmax grids instead endif - - xhic = xlowc + dxc*mxc - yhic = ylowc + dyc*myc - - ! Find indices of various quantities in the fgrid arrays - bathy_index = 4 ! works for both shallow and bouss - eta_index = 5 ! works for both shallow and bouss - + + xhic = xlowc + dxc*mxc + yhic = ylowc + dyc*myc + + !write(59,*) '+++ ifg,jfg,eta,geometry at t = ',t - - ! Primary interpolation loops + + ! Primary interpolation loops do ifg=1,fgrid%mx xfg=fgrid%x_low + (ifg-0.5d0)*fgrid%dx ! cell centers do jfg=1,fgrid%my yfg=fgrid%y_low + (jfg-0.5d0)*fgrid%dy ! cell centers - + ! Check to see if this coordinate is inside of this grid if (.not.((xfg < xlowc.or.xfg > xhic) & .or.(yfg < ylowc.or.yfg > yhic))) then - - ! find where xfg,yfg is in the computational grid and + + ! find where xfg,yfg is in the computational grid and ! compute the indices ! (Note: may be subject to rounding error if fgout point ! is right on a cell edge!) ic1 = int((xfg - xlowc + dxc)/dxc) jc1 = int((yfg - ylowc + dyc)/dyc) - + if (method == 0) then - + ! piecewise constant: take values from cell (ic1,jc1): - + forall (m=1:meqn) fg_data(m,ifg,jfg) = q(m,ic1,jc1) end forall - - fg_data(bathy_index,ifg,jfg) = aux(1,ic1,jc1) - + + fg_data(fgrid%bathy_index,ifg,jfg) = aux(1,ic1,jc1) + ! for pw constant we take B, h, eta from same cell, ! so setting eta = h+B should be fine even near shore: - fg_data(eta_index,ifg,jfg) = fg_data(1,ifg,jfg) & - + fg_data(bathy_index,ifg,jfg) - - + fg_data(fgrid%eta_index,ifg,jfg) = fg_data(1,ifg,jfg) & + + fg_data(fgrid%bathy_index,ifg,jfg) + + else if (method == 1) then - + ! bilinear used to interpolate to xfg,yfg ! (not recommended) - + ! define constant parts of bilinear if (ic1.eq.mxc) ic1=mxc-1 - if (jc1.eq.myc) jc1=myc-1 + if (jc1.eq.myc) jc1=myc-1 ic2 = ic1 + 1 jc2 = jc1 + 1 - + xc1 = xlowc + dxc * (ic1 - 0.5d0) yc1 = ylowc + dyc * (jc1 - 0.5d0) xc2 = xlowc + dxc * (ic2 - 0.5d0) yc2 = ylowc + dyc * (jc2 - 0.5d0) - + geometry = [(xfg - xc1) / dxc, & (yfg - yc1) / dyc, & (xfg - xc1) * (yfg - yc1) / (dxc*dyc), & 1.d0] - - + + ! Interpolate all conserved quantities and bathymetry forall (m=1:meqn) fg_data(m,ifg,jfg) = & - interpolate(q(m,ic1:ic2,jc1:jc2), geometry) + interpolate(q(m,ic1:ic2,jc1:jc2), geometry) end forall - fg_data(bathy_index,ifg,jfg) = & + fg_data(fgrid%bathy_index,ifg,jfg) = & interpolate(aux(1,ic1:ic2,jc1:jc2),geometry) - + ! surface eta = h + B: - + ! Note that for pw bilinear interp there may ! be a problem interpolating each separately since ! interpolated h + interpolated B may be much larger ! than eta should be offshore. eta = q(1,ic1:ic2,jc1:jc2) + aux(1,ic1:ic2,jc1:jc2) - fg_data(eta_index,ifg,jfg) = interpolate(eta,geometry) - ! NEED TO FIX + fg_data(fgrid%eta_index,ifg,jfg) = interpolate(eta,geometry) + ! NEED TO FIX endif - - + + ! save the time this fgout point was computed: fg_data(num_vars,ifg,jfg) = t - - + + endif ! if fgout point is on this grid enddo ! fgout y-coordinate loop enddo ! fgout x-coordinte loop - + end subroutine fgout_interp - + !================ fgout_write ========================================== ! This routine interpolates in time and then outputs a grid at @@ -357,12 +432,12 @@ subroutine fgout_write(fgrid,out_time,out_index) use geoclaw_module, only: dry_tolerance implicit none - + ! Subroutine arguments type(fgout_grid), intent(inout) :: fgrid real(kind=8), intent(in) :: out_time integer, intent(in) :: out_index - + ! I/O integer, parameter :: unit = 87 character(len=15) :: fg_filename @@ -370,14 +445,14 @@ subroutine fgout_write(fgrid,out_time,out_index) character(len=8) :: file_format integer :: grid_number,ipos,idigit,out_number,columns integer :: ifg,ifg1, iframe,iframe1 - + integer, parameter :: method = 0 ! interpolate in time? - - ! Output format strings + + ! Output format strings ! These are now the same as in outval for frame data, for compatibility ! For fgout grids there is only a single grid (ngrids=1) ! and we set AMR_level=0, naux=0, nghost=0 (so no extra cells in binary) - + character(len=*), parameter :: header_format = & "(i6,' grid_number',/," // & "i6,' AMR_level',/," // & @@ -387,7 +462,7 @@ subroutine fgout_write(fgrid,out_time,out_index) "e26.16,' ylow', /," // & "e26.16,' dx', /," // & "e26.16,' dy',/)" - + character(len=*), parameter :: t_file_format = "(e18.8,' time', /," // & "i6,' meqn'/," // & "i6,' ngrids'/," // & @@ -395,64 +470,64 @@ subroutine fgout_write(fgrid,out_time,out_index) "i6,' ndim'/," // & "i6,' nghost'/," // & "a10,' format'/,/)" - + ! Other locals - integer :: i,j,m - real(kind=8) :: t0,tf,tau, qaug(6) + integer :: i,j,m,iq,k + real(kind=8) :: t0,tf,tau, qaug(10) real(kind=8), allocatable :: qeta(:,:,:) real(kind=4), allocatable :: qeta4(:,:,:) real(kind=8) :: h_early,h_late,topo_early,topo_late - - allocate(qeta(4, fgrid%mx, fgrid%my)) ! to store h,hu,hv,eta - - - ! Interpolate the grid in time, to the output time, using - ! the solution in fgrid1 and fgrid2, which represent the + + allocate(qeta(fgrid%nqout, fgrid%mx, fgrid%my)) ! to store h,hu,hv,eta + ! or subset + + ! Interpolate the grid in time, to the output time, using + ! the solution in fgrid1 and fgrid2, which represent the ! solution on the fixed grid at the two nearest computational times do j=1,fgrid%my do i=1,fgrid%mx - + ! Check for small numbers - forall(m=1:fgrid%num_vars(1)-1,abs(fgrid%early(m,i,j)) < 1d-90) + forall(m=1:fgrid%num_vars-1,abs(fgrid%early(m,i,j)) < 1d-90) fgrid%early(m,i,j) = 0.d0 end forall if (method == 0) then - + ! no interpolation in time, use solution from full step: qaug = fgrid%early(:,i,j) - + ! note that CFL condition ==> waves can't move more than 1 ! cell per time step on each level, so solution from nearest ! full step should be correct to within a cell width ! Better to use early than late since for refinement tracking ! wave moving out into still water. - + else if (method == 1) then - + ! interpolate in time. May have problems near shore? - - ! Fetch times for interpolation, this is done per grid point + + ! Fetch times for interpolation, this is done per grid point ! since each grid point may come from a different source - t0 = fgrid%early(fgrid%num_vars(1),i,j) - tf = fgrid%late(fgrid%num_vars(1),i,j) + t0 = fgrid%early(fgrid%num_vars,i,j) + tf = fgrid%late(fgrid%num_vars,i,j) tau = (out_time - t0) / (tf - t0) - + ! check for small values: - forall(m=1:fgrid%num_vars(1)-1,abs(fgrid%late(m,i,j)) < 1d-90) + forall(m=1:fgrid%num_vars-1,abs(fgrid%late(m,i,j)) < 1d-90) fgrid%late(m,i,j) = 0.d0 end forall - + ! linear interpolation: qaug = (1.d0-tau)*fgrid%early(:,i,j) + tau*fgrid%late(:,i,j) - + ! If resolution changed between early and late time, may be ! problems near shore when interpolating B, h, eta ! separately (at least in case when B changed and point ! was dry at one time and wet the other). ! Switch back to fgrid%early values, only in this case. ! This is implemented below but not extensively tested. - + if (qaug(1) > 0.d0) then topo_early = fgrid%early(4,i,j) topo_late = fgrid%late(4,i,j) @@ -470,13 +545,53 @@ subroutine fgout_write(fgrid,out_time,out_index) endif endif endif - + ! Output the conserved quantities and eta value - qeta(1,i,j) = qaug(1) ! h - qeta(2,i,j) = qaug(2) ! hu - qeta(3,i,j) = qaug(3) ! hv - qeta(4,i,j) = qaug(5) ! eta + iq = 1 + ! qaug(1:3) are h,hu,hv for both GeoClaw and D-Claw: + if (fgrid%iqout(1)) then + qeta(iq,i,j) = qaug(1) ! h + iq = iq+1 + endif + if (fgrid%iqout(2)) then + qeta(iq,i,j) = qaug(2) ! hu + iq = iq+1 + endif + if (fgrid%iqout(3)) then + qeta(iq,i,j) = qaug(3) ! hv + iq = iq+1 + endif + if (fgrid%num_vars == 6) then + ! GeoClaw: + if (fgrid%iqout(4)) then + qeta(iq,i,j) = qaug(5) ! eta since qaug(4)=topo + iq = iq+1 + endif + + else if (fgrid%num_vars == 10) then + ! D-Claw: + if (fgrid%iqout(4)) then + qeta(iq,i,j) = qaug(4) ! hm + iq = iq+1 + endif + if (fgrid%iqout(5)) then + qeta(iq,i,j) = qaug(5) ! pb + iq = iq+1 + endif + if (fgrid%iqout(6)) then + qeta(iq,i,j) = qaug(6) ! hchi + iq = iq+1 + endif + if (fgrid%iqout(7)) then + qeta(iq,i,j) = qaug(7) ! b_eroded + iq = iq+1 + endif + if (fgrid%iqout(8)) then + qeta(iq,i,j) = qaug(9) ! eta since qaug(8)=topo + iq = iq+1 + endif + endif enddo enddo @@ -499,96 +614,83 @@ subroutine fgout_write(fgrid,out_time,out_index) cframeno(ipos:ipos) = char(ichar('0') + idigit) iframe1 = iframe1/10 enddo - - fg_filename = 'fgout' // cfgno // '.q' // cframeno + + fg_filename = 'fgout' // cfgno // '.q' // cframeno open(unit,file=fg_filename,status='unknown',form='formatted') - ! Determine number of columns that will be written out - columns = fgrid%num_vars(1) - 1 - if (fgrid%num_vars(2) > 1) then - columns = columns + 2 - endif - - !write(6,*) '+++ fgout out_time = ',out_time - !write(6,*) '+++ fgrid%num_vars: ',fgrid%num_vars(1),fgrid%num_vars(2) - - ! Write out header in .q file: - !write(unit,header_format) out_time,fgrid%mx,fgrid%my, & - ! fgrid%x_low,fgrid%y_low, fgrid%x_hi,fgrid%y_hi, columns write(unit,header_format) fgrid%fgno, 0, fgrid%mx,fgrid%my, & fgrid%x_low,fgrid%y_low, fgrid%dx, fgrid%dy - + if (fgrid%output_format == 1) then ! ascii output added to .q file: do j=1,fgrid%my do i=1,fgrid%mx - write(unit, "(50e26.16)") qeta(1,i,j),qeta(2,i,j), & - qeta(3,i,j),qeta(4,i,j) + write(unit, "(50e26.16)") (qeta(k,i,j), k=1,fgrid%nqout) enddo write(unit,*) ' ' ! blank line required between rows enddo - endif - + endif + close(unit) - + if (fgrid%output_format == 3) then ! binary output goes in .b file as full 8-byte (float64): - fg_filename = 'fgout' // cfgno // '.b' // cframeno + fg_filename = 'fgout' // cfgno // '.b' // cframeno open(unit=unit, file=fg_filename, status="unknown", & access='stream') write(unit) qeta close(unit) else if (fgrid%output_format == 2) then ! binary output goes in .b file as 4-byte (float32): - fg_filename = 'fgout' // cfgno // '.b' // cframeno + fg_filename = 'fgout' // cfgno // '.b' // cframeno open(unit=unit, file=fg_filename, status="unknown", & access='stream') - allocate(qeta4(4, fgrid%mx, fgrid%my)) ! for 4-byte binary output + allocate(qeta4(fgrid%nqout, fgrid%mx, fgrid%my)) qeta4 = real(qeta, kind=4) write(unit) qeta4 deallocate(qeta4) close(unit) endif - + deallocate(qeta) ! time info .t file: - + if (fgrid%output_format == 1) then file_format = 'ascii' else if (fgrid%output_format == 2) then file_format = 'binary32' else if (fgrid%output_format == 3) then file_format = 'binary64' - else + else write(6,*) '*** unrecognized fgrid%output_format = ', & fgrid%output_format write(6,*) '*** should be ascii, binary32, or binary64' endif - - fg_filename = 'fgout' // cfgno // '.t' // cframeno + + fg_filename = 'fgout' // cfgno // '.t' // cframeno open(unit=unit, file=fg_filename, status='unknown', form='formatted') ! time, num_eqn+1, num_grids, num_aux, num_dim, num_ghost: - write(unit, t_file_format) out_time, 4, 1, 0, 2, 0, file_format + write(unit, t_file_format) out_time, fgrid%nqout, 1, 0, 2, 0,file_format close(unit) - + print "(a,i4,a,i4,a,e18.8)",'Writing fgout grid #',fgrid%fgno, & ' frame ',out_index,' at time =',out_time - + ! Index into qeta for binary output ! Note that this implicitly assumes that we are outputting only h, hu, hv ! and will not output more (change num_eqn parameter above) - + end subroutine fgout_write - - + + ! ========================================================================= ! Utility functions for this module ! Returns back a NaN - + real(kind=8) function nan() real(kind=8) dnan integer inan(2) @@ -597,21 +699,21 @@ real(kind=8) function nan() inan(2)=2147483647 nan=dnan end function nan - + ! Interpolation function (in space) ! Given 4 points (points) and geometry from x,y,and cross terms - + real(kind=8) pure function interpolate(points,geometry) result(interpolant) - + implicit none - + ! Function signature real(kind=8), intent(in) :: points(2,2) real(kind=8), intent(in) :: geometry(4) integer :: icell, jcell - + ! pw bilinear - ! This is set up as a dot product between the approrpriate terms in + ! This is set up as a dot product between the approrpriate terms in ! the input data. This routine could be vectorized or a BLAS routine ! used instead of the intrinsics to ensure that the fastest routine ! possible is being used @@ -619,8 +721,8 @@ real(kind=8) pure function interpolate(points,geometry) result(interpolant) points(1,2)-points(1,1), & points(1,1) + points(2,2) - (points(2,1) + points(1,2)), & points(1,1)] * geometry) - + end function interpolate - + end module fgout_module diff --git a/src/2d/shallow/gauges_module.f90 b/src/2d/shallow/gauges_module.f90 index 82b20e2a7..5c60833d2 100644 --- a/src/2d/shallow/gauges_module.f90 +++ b/src/2d/shallow/gauges_module.f90 @@ -58,8 +58,8 @@ module gauges_module integer :: gauge_num integer :: gdata_bytes - character(len=14) :: file_name ! for header (and data if 'ascii') - character(len=14) :: file_name_bin ! used if file_format='binary' + character(len=24) :: file_name ! for header (and data if 'ascii') + character(len=24) :: file_name_bin ! used if file_format='binary' ! Location in time and space real(kind=8) :: x, y, t_start, t_end @@ -116,6 +116,7 @@ subroutine set_gauges(restart, num_eqn, num_aux, fname) integer, parameter :: UNIT = 7 character(len=128) :: header_1 character(len=40) :: q_column, aux_column + character(len=15) :: numstr if (.not.module_setup) then @@ -207,17 +208,15 @@ subroutine set_gauges(restart, num_eqn, num_aux, fname) ! Create gauge output files do i = 1, num_gauges - gauges(i)%file_name = 'gaugexxxxx.txt' ! ascii num = gauges(i)%gauge_num - do pos = 10, 6, -1 - digit = mod(num,10) - gauges(i)%file_name(pos:pos) = char(ichar('0') + digit) - num = num / 10 - end do + + ! convert num to string numstr with zero padding if <5 digits + ! since we want format gauge00012.txt or gauge1234567.txt: + write (numstr,'(I0.5)') num + gauges(i)%file_name = 'gauge'//trim(numstr)//'.txt' if (gauges(i)%file_format >= 2) then - gauges(i)%file_name_bin = gauges(i)%file_name - gauges(i)%file_name_bin(12:14) = 'bin' + gauges(i)%file_name_bin = 'gauge'//trim(numstr)//'.bin' endif @@ -243,7 +242,7 @@ subroutine set_gauges(restart, num_eqn, num_aux, fname) if (.not. restart) then ! Write header to .txt file: - header_1 = "('# gauge_id= ',i5,' " // & + header_1 = "('# gauge_id= ',i0,' " // & "location=( ',1e17.10,' ',1e17.10,' ) " // & "num_var= ',i2)" write(OUTGAUGEUNIT, header_1) gauges(i)%gauge_num, & diff --git a/src/2d/shallow/src2.f90 b/src/2d/shallow/src2.f90 index dc603c36e..376daf7e0 100644 --- a/src/2d/shallow/src2.f90 +++ b/src/2d/shallow/src2.f90 @@ -6,7 +6,7 @@ subroutine src2(meqn,mbc,mx,my,xlower,ylower,dx,dy,q,maux,aux,t,dt) use geoclaw_module, only: manning_break, num_manning use geoclaw_module, only: spherical_distance, coordinate_system use geoclaw_module, only: RAD2DEG, pi, dry_tolerance, DEG2RAD - use geoclaw_module, only: rho_air + use geoclaw_module, only: rho_air, rho use geoclaw_module, only: earth_radius, sphere_source use storm_module, only: wind_forcing, pressure_forcing, wind_drag @@ -40,10 +40,6 @@ subroutine src2(meqn,mbc,mx,my,xlower,ylower,dx,dy,q,maux,aux,t,dt) ! friction source term real(kind=8), parameter :: depth_tolerance = 1.0d-30 - ! Physics - ! Nominal density of water - real(kind=8), parameter :: rho = 1025.d0 - ! ---------------------------------------------------------------- ! Spherical geometry source term(s) ! @@ -154,7 +150,7 @@ subroutine src2(meqn,mbc,mx,my,xlower,ylower,dx,dy,q,maux,aux,t,dt) endif wind_speed = sqrt(aux(wind_index,i,j)**2 & + aux(wind_index+1,i,j)**2) - tau = wind_drag(wind_speed, phi) * rho_air * wind_speed / rho + tau = wind_drag(wind_speed, phi) * rho_air * wind_speed / rho(1) q(2,i,j) = q(2,i,j) + dt * tau * aux(wind_index,i,j) q(3,i,j) = q(3,i,j) + dt * tau * aux(wind_index+1,i,j) endif @@ -195,8 +191,8 @@ subroutine src2(meqn,mbc,mx,my,xlower,ylower,dx,dy,q,maux,aux,t,dt) ! ! Modify momentum in each layer ! if (h > dry_tolerance) then - ! q(2, i, j) = q(2, i, j) - dt * h * P_gradient(1) / rho - ! q(3, i, j) = q(3, i, j) - dt * h * P_gradient(2) / rho + ! q(2, i, j) = q(2, i, j) - dt * h * P_gradient(1) / rho(1) + ! q(3, i, j) = q(3, i, j) - dt * h * P_gradient(2) / rho(1) ! end if ! enddo ! enddo diff --git a/src/2d/shallow/surge/model_storm_module.f90 b/src/2d/shallow/surge/model_storm_module.f90 index cce7445a9..b49e98120 100644 --- a/src/2d/shallow/surge/model_storm_module.f90 +++ b/src/2d/shallow/surge/model_storm_module.f90 @@ -51,6 +51,19 @@ module model_storm_module end type model_storm_type + ! How to deterimine which way a storm should be made to spin + ! The default defers simply to assuming y is a latitude + ! Here either an integer or bool can be returned but as implemented 1 + ! refers to the Northern Hemisphere and therefore causes the storm to spin + ! in a counter-clockwise direction. + abstract interface + logical pure function rotation_def(x, y) + implicit none + real(kind=8), intent(in) :: x, y + end function rotation_def + end interface + procedure(rotation_def), pointer :: rotation + ! Interal tracking variables for storm integer, private :: last_storm_index @@ -68,12 +81,6 @@ module model_storm_module ! track to be close to but not equal the start time of the simulation real(kind=8), parameter :: TRACKING_TOLERANCE = 1d-10 - ! Global constants #TODO: Some of these are in geoclaw already - real(kind=8) :: pi=3.1415927 - real(kind=8) :: omega=7.2921e-5 - real(kind=8) :: chi=1.0 - real(kind=8) :: alpha=1.0 - contains @@ -153,8 +160,8 @@ subroutine set_storm(storm_data_path, storm, storm_spec_type, log_unit) 0.5d0 * (x(1) + y(1)), y(2)) storm%velocity(2, i) = sign(ds / dt, y(2) - x(2)) else - storm%velocity(1, i) = abs(x(2) - x(1)) / dt - storm%velocity(2, i) = abs(y(2) - y(1)) / dt + storm%velocity(1, i) = (y(1) - x(1)) / dt + storm%velocity(2, i) = (y(2) - x(2)) / dt end if end do @@ -338,7 +345,7 @@ subroutine get_storm_data(t, storm, location, velocity, max_wind_radius, & max_wind_speed, central_pressure, & radius, central_pressure_change) - use geoclaw_module, only: deg2rad, latlon2xy, xy2latlon + use geoclaw_module, only: deg2rad, latlon2xy, xy2latlon, coordinate_system implicit none @@ -382,13 +389,20 @@ subroutine get_storm_data(t, storm, location, velocity, max_wind_radius, & ! Convert coordinates temporarily to meters so that we can use ! the pre-calculated m/s velocities from before - x = latlon2xy(storm%track(2:3,i),storm%track(2:3,i)) - x = x + (t - storm%track(1,i)) * storm%velocity(:,i) - - fn = [xy2latlon(x,storm%track(2:3,i)), & - storm%velocity(:,i), storm%max_wind_radius(i), & - storm%max_wind_speed(i), storm%central_pressure(i), & - storm%radius(i), storm%central_pressure_change(i)] + if (coordinate_system == 2) then + x = latlon2xy(storm%track(2:3,i),storm%track(2:3,i)) + x = x + (t - storm%track(1,i)) * storm%velocity(:,i) + fn = [xy2latlon(x,storm%track(2:3,i)), & + storm%velocity(:,i), storm%max_wind_radius(i), & + storm%max_wind_speed(i), storm%central_pressure(i), & + storm%radius(i), storm%central_pressure_change(i)] + else + x = x + (t - storm%track(1,i)) * storm%velocity(:,i) + fn = [x, & + storm%velocity(:,i), storm%max_wind_radius(i), & + storm%max_wind_speed(i), storm%central_pressure(i), & + storm%radius(i), storm%central_pressure_change(i)] + end if else ! Inbetween two forecast time points (the function storm_index ! ensures that we are not before the first data point, i.e. i > 1) @@ -430,11 +444,8 @@ end subroutine get_storm_data pure subroutine adjust_max_wind(tv, mws, mod_mws, convert_height) real (kind=8), intent(inout) :: tv(2) - real (kind=8), intent(in) :: mws - logical, intent(in) :: convert_height - real (kind=8), intent(out) :: mod_mws real (kind=8) :: trans_speed, trans_mod @@ -521,21 +532,21 @@ end subroutine post_process_wind_estimate ! ========================================================================== pure subroutine calculate_polar_coordinate(x, y, sloc, r, theta) - use geoclaw_module, only: deg2rad, coordinate_system - use geoclaw_module, only: spherical_distance + use geoclaw_module, only: deg2rad, coordinate_system + use geoclaw_module, only: spherical_distance - real(kind=8), intent(in) :: x, y, sloc(2) - real(kind=8), intent(out) :: r, theta + real(kind=8), intent(in) :: x, y, sloc(2) + real(kind=8), intent(out) :: r, theta - if (coordinate_system == 2) then - ! lat-long coordinates, uses Haversine formula - r = spherical_distance(x, y, sloc(1), sloc(2)) - theta = atan2((y - sloc(2)) * DEG2RAD,(x - sloc(1)) * DEG2RAD) - else - ! Strictly cartesian - r = sqrt( (x - sloc(1))**2 + (y - sloc(2))**2) - theta = atan2(y - sloc(2), x - sloc(1)) - end if + if (coordinate_system == 2) then + ! lat-long coordinates, uses Haversine formula + r = spherical_distance(x, y, sloc(1), sloc(2)) + theta = atan2((y - sloc(2)),(x - sloc(1))) + else + ! Strictly cartesian + r = sqrt( (x - sloc(1))**2 + (y - sloc(2))**2) + theta = atan2((y - sloc(2)), (x - sloc(1))) + end if end subroutine calculate_polar_coordinate @@ -653,7 +664,7 @@ subroutine set_holland_1980_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -725,7 +736,7 @@ subroutine set_holland_2008_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -817,7 +828,7 @@ subroutine set_holland_2010_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -974,6 +985,7 @@ real(kind=8) function evaluate_inner_derivative(f,r_m,v_m,r_a,v_a) result(dMa) ! Variables real(kind=8) :: denominator, M_a + real(kind=8), parameter :: alpha=1.0 ! Calculate necessary components of the derivative to make ! calculations easier @@ -992,6 +1004,7 @@ real(kind=8) function evaluate_v_a(f,r_m,v_m,r_a) result(v_a) ! Input real(kind=8), intent(in) :: f, r_m, v_m, r_a + real(kind=8), parameter :: alpha=1.0 v_a = ((2.0*(r_a/r_m)**2)/(2.0-alpha+alpha*(r_a/r_m)**2))**(1.0/(2.0-alpha)) v_a = v_a*(0.5*f*r_m**2 + r_m*v_m) @@ -1061,6 +1074,7 @@ subroutine integrate_m_out(f,r_0,r_a,res,m_out) ! Parameters and Other variables real(kind=8) :: V_m, dr, r_guess, r_p integer :: i + real(kind=8), parameter :: chi=1.0 ! Intialize m_out(res) = 0.5*f*r_0**2 @@ -1104,6 +1118,8 @@ subroutine solve_hurricane_wind_parameters(f, r_m, v_m, res, r_0, r_a) real(kind=8) :: dr, r real(kind=8) :: inner_res, outer_res real(kind=8), dimension(1:res) :: v_out, v_in + real(kind=8), parameter :: chi=1.0 + real(kind=8), parameter :: alpha=1.0 ! Initialize guesses for merge and r_0 r_a = 2.0*r_m @@ -1255,8 +1271,8 @@ subroutine set_SLOSH_fields(maux, mbc, mx, my, xlower, ylower, & trans_speed_x = tv(1) * mwr * r / (mwr**2.d0 + r**2.d0) trans_speed_y = tv(2) * mwr * r / (mwr**2.d0 + r**2.d0) - aux(wind_index,i,j) = wind * merge(-1, 1, y >= 0) * sin(theta) + trans_speed_x - aux(wind_index+1,i,j) = wind * merge(1, -1, y >= 0) * cos(theta) + trans_speed_y + aux(wind_index,i,j) = wind * merge(-1, 1, rotation(x, y)) * sin(theta) + trans_speed_x + aux(wind_index+1,i,j) = wind * merge(1, -1, rotation(x, y)) * cos(theta) + trans_speed_y enddo enddo @@ -1331,7 +1347,7 @@ subroutine set_rankine_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -1419,7 +1435,7 @@ subroutine set_modified_rankine_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -1492,7 +1508,7 @@ subroutine set_deMaria_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -1584,7 +1600,7 @@ subroutine set_willoughby_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo diff --git a/src/2d/shallow/surge/storm_module.f90 b/src/2d/shallow/surge/storm_module.f90 index 7af8b46ae..632cbff90 100644 --- a/src/2d/shallow/surge/storm_module.f90 +++ b/src/2d/shallow/surge/storm_module.f90 @@ -11,7 +11,7 @@ module storm_module - use model_storm_module, only: model_storm_type + use model_storm_module, only: model_storm_type, rotation use data_storm_module, only: data_storm_type implicit none @@ -126,7 +126,7 @@ subroutine set_storm(data_file) ! Locals integer, parameter :: unit = 13 - integer :: i, drag_law + integer :: i, drag_law, rotation_override character(len=200) :: storm_file_path, line if (.not.module_setup) then @@ -153,6 +153,17 @@ subroutine set_storm(data_file) stop "*** ERROR *** Invalid wind drag law." end select read(unit,*) pressure_forcing + read(unit,*) rotation_override + select case(rotation_override) + case(0) + rotation => hemisphere_rotation + case(1) + rotation => N_rotation + case(2) + rotation => S_rotation + case default + stop " *** ERROR *** Roation override invalid." + end select read(unit,*) ! Set some parameters @@ -480,4 +491,26 @@ subroutine output_storm_location(t) end subroutine output_storm_location + ! ========================================================================== + ! Default to assuming y is a latitude and if y >= 0 we are want to spin + ! counter-clockwise + ! ========================================================================== + logical pure function hemisphere_rotation(x, y) result(rotation) + implicit none + real(kind=8), intent(in) :: x, y + rotation = (y >= 0.d0) + end function hemisphere_rotation + ! This version just returns the user defined direction + logical pure function N_rotation(x, y) result(rotation) + implicit none + real(kind=8), intent(in) :: x, y + rotation = .true. + end function N_rotation + ! This version just returns the user defined direction + logical pure function S_rotation(x, y) result(rotation) + implicit none + real(kind=8), intent(in) :: x, y + rotation = .false. + end function S_rotation + end module storm_module diff --git a/src/python/geoclaw/data.py b/src/python/geoclaw/data.py index e3d514ffc..83d9fa769 100755 --- a/src/python/geoclaw/data.py +++ b/src/python/geoclaw/data.py @@ -545,17 +545,19 @@ class SurgeData(clawpack.clawutil.data.ClawData): 'SLOSH': 4, 'rankine': 5, 'modified-rankine': 6, - 'DeMaria': 7 + 'DeMaria': 7, + 'willoughby': 9, } - storm_spec_not_implemented = ['CLE'] + storm_spec_not_implemented = ['CLE', 'willoughby'] def __init__(self): - super(SurgeData,self).__init__() + super(SurgeData, self).__init__() # Source term controls - self.add_attribute('wind_forcing',False) - self.add_attribute('drag_law',1) - self.add_attribute('pressure_forcing',False) + self.add_attribute('wind_forcing', False) + self.add_attribute('drag_law', 1) + self.add_attribute('pressure_forcing', False) + self.add_attribute('rotation_override', 0) # Algorithm parameters - Indexing is python based self.add_attribute("wind_index", 4) @@ -563,8 +565,8 @@ def __init__(self): self.add_attribute("display_landfall_time", False) # AMR parameters - self.add_attribute('wind_refine',[20.0,40.0,60.0]) - self.add_attribute('R_refine',[60.0e3,40e3,20e3]) + self.add_attribute('wind_refine', [20.0,40.0,60.0]) + self.add_attribute('R_refine', [60.0e3,40e3,20e3]) # Storm parameters self.add_attribute('storm_type', None) # Backwards compatibility @@ -572,7 +574,7 @@ def __init__(self): self.add_attribute("storm_file", None) # File(s) containing data - def write(self,out_file='surge.data',data_source="setrun.py"): + def write(self, out_file='surge.data', data_source="setrun.py"): """Write out the data file to the path given""" # print "Creating data file %s" % out_file @@ -582,6 +584,19 @@ def write(self,out_file='surge.data',data_source="setrun.py"): self.data_write('drag_law', description='(Type of drag law to use)') self.data_write('pressure_forcing', description="(Pressure source term used)") + if isinstance(self.rotation_override, str): + if self.rotation_override.lower() == "normal": + self.rotation_override = 0 + elif "n" in self.rotation_override.lower(): + self.rotation_override = 1 + elif "s" in self.rotation_override.lower(): + self.rotation_override = 2 + else: + raise ValueError("Unknown rotation_override specification.") + else: + self.rotation_override = int(self.rotation_override) + self.data_write('rotation_override', + description="(Override storm rotation)") self.data_write() self.data_write("wind_index", value=self.wind_index + 1, diff --git a/src/python/geoclaw/fgout_tools.py b/src/python/geoclaw/fgout_tools.py index 411cce574..ed8f675c6 100644 --- a/src/python/geoclaw/fgout_tools.py +++ b/src/python/geoclaw/fgout_tools.py @@ -25,12 +25,11 @@ """ import os - +from numpy import sqrt, ma, mod import numpy -from numpy import ma, mod, sqrt - class FGoutFrame(object): + """ Class to hold a single frame of fgout data at one output time. Several attributes are defined as properties that can be evaluated @@ -56,7 +55,7 @@ def __init__(self, fgout_grid, frameno=None): # Define shortcuts to attributes of self.fgout_grid that are the same # for all frames (e.g. X,Y) to avoid storing grid for every frame. - + @property def x(self): return self.fgout_grid.x @@ -64,15 +63,15 @@ def x(self): @property def y(self): return self.fgout_grid.y - + @property def X(self): return self.fgout_grid.X - + @property def Y(self): return self.fgout_grid.Y - + @property def delta(self): return self.fgout_grid.delta @@ -80,15 +79,15 @@ def delta(self): @property def extent_centers(self): return self.fgout_grid.extent_centers - + @property def extent_edges(self): return self.fgout_grid.extent_edges - + @property def drytol(self): return self.fgout_grid.drytol - + # Define attributes such as h as @properties with lazy evaluation: # the corresponding array is created and stored only when first # accessed by the user. Those not needed are not created. @@ -97,61 +96,55 @@ def drytol(self): def h(self): """depth""" if self._h is None: - # print('+++ setting _h...') - self._h = self.q[0, :, :] - # print('+++ getting _h...') + #print('+++ setting _h...') + self._h = self.q[0,:,:] + #print('+++ getting _h...') return self._h @property def hu(self): """momentum h*u""" if self._hu is None: - self._hu = self.q[1, :, :] + self._hu = self.q[1,:,:] return self._hu @property def u(self): """speed u, computed as hu/h or set to 0 if h self.drytol), - ) + self._u = numpy.divide(self.hu, self.h,\ + out=numpy.zeros(self.h.shape, dtype=float), \ + where=(self.h>self.drytol)) return self._u @property def hv(self): """momentum h*v""" if self._hv is None: - self._hv = self.q[2, :, :] + self._hv = self.q[2,:,:] return self._hv @property def v(self): """speed v, computed as hv/h or set to 0 if h self.drytol), - ) + self._v = numpy.divide(self.hv, self.h,\ + out=numpy.zeros(self.h.shape, dtype=float), \ + where=(self.h>self.drytol)) return self._v @property def eta(self): """surface eta = h+B""" if self._eta is None: - self._eta = self.q[-1, :, :] + self._eta = self.q[-1,:,:] return self._eta @property def B(self): """topography""" if self._B is None: - self._B = self.q[-1, :, :] - self.q[0, :, :] + self._B = self.q[-1,:,:] - self.q[0,:,:] return self._B @property @@ -170,38 +163,42 @@ def hss(self): class FGoutGrid(object): + """ New class introduced in 5.9.0 to keep store information both about the fgout input data and the output generated by a GeoClaw run. """ - def __init__(self, fgno=None, outdir=None, output_format=None): + def __init__(self,fgno=None,outdir=None,output_format=None): + # GeoClaw input values: - self.id = "" # identifier, optional + self.id = '' # identifier, optional self.point_style = 2 # only option currently supported self.npts = None self.nx = None self.ny = None - self.tstart = None + self.output_style = 1 + self.tstart = None self.tend = None self.nout = None self.fgno = fgno self.outdir = outdir self.output_format = output_format - - self.drytol = 1e-3 # used for computing u,v from hu,hv + self.dclaw = False + + self.drytol = 1e-3 # used for computing u,v from hu,hv # private attributes for those that are only created if # needed by the user: - + self._X = None self._Y = None self._x = None self._y = None - self._delta = None # (dx,dy) + self._delta = None # (dx,dy) self._extent_centers = None # defined by fgout points - self._extent_edges = None # extended so points are cell centers + self._extent_edges = None # extended so points are cell centers self._plotdata = None @@ -209,18 +206,18 @@ def __init__(self, fgno=None, outdir=None, output_format=None): def x(self): """1D array x of longitudes (cell centers)""" if self._x is None: - dx = (self.x2 - self.x1) / self.nx - self._x = numpy.linspace(self.x1 + dx / 2, self.x2 - dx / 2, self.nx) + dx = (self.x2 - self.x1)/self.nx + self._x = numpy.linspace(self.x1+dx/2, self.x2-dx/2, self.nx) return self._x @property def y(self): """1D array y of latitudes (cell centers)""" if self._y is None: - dy = (self.y2 - self.y1) / self.ny - self._y = numpy.linspace(self.y1 + dy / 2, self.y2 - dy / 2, self.ny) + dy = (self.y2 - self.y1)/self.ny + self._y = numpy.linspace(self.y1+dy/2, self.y2-dy/2, self.ny) return self._y - + @property def X(self): """2D array X of longitudes (cell centers)""" @@ -238,38 +235,31 @@ def Y(self): @property def delta(self): if self._delta is None: - dx = (self.x2 - self.x1) / self.nx - dy = (self.y2 - self.y1) / self.ny - self._delta = (dx, dy) + dx = (self.x2 - self.x1)/self.nx + dy = (self.y2 - self.y1)/self.ny + self._delta = (dx,dy) return self._delta - + @property def extent_centers(self): """Extent of cell centers [xmin,xmax,ymin,ymax]""" if self._extent_centers is None: - self._extent_centers = [ - self.x.min(), - self.x.max(), - self.y.min(), - self.y.max(), - ] + self._extent_centers = [self.x.min(), self.x.max(),\ + self.y.min(), self.y.max()] return self._extent_centers - + @property def extent_edges(self): """Extent of cell edges [xmin,xmax,ymin,ymax]""" if self._extent_edges is None: - dx, dy = self.delta - self._extent_edges = [ - self.x.min() - dx / 2, - self.x.max() + dx / 2, - self.y.min() - dy / 2, - self.y.max() + dy / 2, - ] + dx,dy = self.delta + self._extent_edges = [self.x.min()-dx/2, self.x.max()+dx/2,\ + self.y.min()-dy/2, self.y.max()+dy/2] return self._extent_edges + # Create plotdata of class clawpack.visclaw.ClawPlotData - # only when needed for reading GeoClaw output, + # only when needed for reading GeoClaw output, # since this must be done after fgno, outdir, output_format # have been specified: @@ -286,25 +276,23 @@ def set_plotdata(self): """ from clawpack.visclaw.data import ClawPlotData - assert self.fgno is not None, "*** fgno must be set" - assert self.outdir is not None, "*** outdir must be set" - assert self.output_format is not None, "*** output_format must be set" + assert self.fgno is not None, '*** fgno must be set' + assert self.outdir is not None, '*** outdir must be set' + assert self.output_format is not None, '*** output_format must be set' if 0: - print( - "+++ creating plotdata for fgno=%i, outdir=%s, format=%s" - % (self.fgno, self.outdir, self.output_format) - ) + print('+++ creating plotdata for fgno=%i, outdir=%s, format=%s' \ + % (self.fgno,self.outdir,self.output_format)) plotdata = ClawPlotData() plotdata.outdir = self.outdir plotdata.format = self.output_format - plotdata.file_prefix = "fgout%s" % str(self.fgno).zfill(4) - if self.output_format[:6] == "binary": + plotdata.file_prefix = 'fgout%s' % str(self.fgno).zfill(4) + if self.output_format[:6]=='binary': # could be 'binary', 'binary32' or 'binary64' - file_prefix_str = plotdata.file_prefix + ".b" + file_prefix_str = plotdata.file_prefix + '.b' else: - file_prefix_str = plotdata.file_prefix + ".q" - + file_prefix_str = plotdata.file_prefix + '.q' + # Contrary to usual default, set save_frames to False so that all # the fgout frames read in are not saved in plotdata.framesoln_dict. # Otherwise this might be a memory hog when making an animation with @@ -313,7 +301,8 @@ def set_plotdata(self): return plotdata - def read_fgout_grids_data(self, fgno=None, data_file="fgout_grids.data"): + + def read_fgout_grids_data(self, fgno=None, data_file='fgout_grids.data'): """ Read input info for fgout grid number fgno from the data file fgout_grids.data, which should have been created by setrun.py. @@ -321,45 +310,68 @@ def read_fgout_grids_data(self, fgno=None, data_file="fgout_grids.data"): """ if fgno is not None: self.fgno = fgno - assert self.fgno is not None, "*** fgno must be set" + assert self.fgno is not None, '*** fgno must be set' with open(data_file) as filep: lines = filep.readlines() fgout_input = None - for lineno, line in enumerate(lines): - if "fgno" in line: + for lineno,line in enumerate(lines): + if 'fgno' in line: if int(line.split()[0]) == self.fgno: - fgout_input = lines[lineno + 1 :] - # print('Found line %i: %s' % (lineno,line)) + fgout_input = lines[lineno+1:] + #print('Found line %i: %s' % (lineno,line)) break if fgout_input is None: - raise ValueError("fgout grid fgno = %i not found in %s" % (fgno, data_file)) - - self.tstart = float(fgout_input[0].split()[0]) - self.tend = float(fgout_input[1].split()[0]) - self.nout = int(fgout_input[2].split()[0]) - self.point_style = point_style = int(fgout_input[3].split()[0]) - output_format = int(fgout_input[4].split()[0]) + raise ValueError('fgout grid fgno = %i not found in %s' \ + % (fgno, data_file)) + + lineno = 0 # next input line + self.output_style = int(fgout_input[lineno].split()[lineno]) + lineno += 1 + if (self.output_style == 1): + # equally spaced times: + self.nout = int(fgout_input[lineno].split()[0]) + lineno += 1 + self.tstart = float(fgout_input[lineno].split()[0]) + lineno += 1 + self.tend = float(fgout_input[lineno].split()[0]) + lineno += 1 + self.times = numpy.linspace(self.tstart, self.tend, self.nout) + elif (self.output_style == 2): + # list of times: + self.nout = int(fgout_input[lineno].split()[0]) + lineno += 1 + times_str = fgout_input[lineno].split()[:self.nout] + self.times = numpy.array([float(ts) for ts in times_str]) + lineno += 1 + else: + raise ValueError('Unrecognized fgout output_style: %s' \ + % self.output_style) + self.point_style = point_style = int(fgout_input[lineno].split()[0]) + lineno += 1 + output_format = int(fgout_input[lineno].split()[0]) + lineno += 1 if output_format == 1: - self.output_format = "ascii" + self.output_format = 'ascii' elif output_format == 3: - self.output_format = "binary" - print( - "Reading input for fgno=%i, point_style = %i " - % (self.fgno, self.point_style) - ) + self.output_format = 'binary' + print('Reading input for fgno=%i, point_style = %i ' \ + % (self.fgno, self.point_style)) if point_style == 2: - self.nx = nx = int(fgout_input[5].split()[0]) - self.ny = ny = int(fgout_input[5].split()[1]) - self.x1 = float(fgout_input[6].split()[0]) - self.y1 = float(fgout_input[6].split()[1]) - self.x2 = float(fgout_input[7].split()[0]) - self.y2 = float(fgout_input[7].split()[1]) + self.nx = nx = int(fgout_input[lineno].split()[0]) + self.ny = ny = int(fgout_input[lineno].split()[1]) + lineno += 1 + self.x1 = float(fgout_input[lineno].split()[0]) + self.y1 = float(fgout_input[lineno].split()[1]) + lineno += 1 + self.x2 = float(fgout_input[lineno].split()[0]) + self.y2 = float(fgout_input[lineno].split()[1]) + lineno += 1 else: - raise NotImplementedError( - "fgout not implemented for point_style %i" % point_style - ) + raise NotImplementedError("fgout not implemented for point_style %i" \ + % point_style) + def write_to_fgout_data(self, fid): """ @@ -368,64 +380,83 @@ def write_to_fgout_data(self, fid): """ print("\n---------------------------------------------- ") - assert self.point_style is not None, "Need to set point_style" + assert self.point_style is not None, 'Need to set point_style' point_style = self.point_style if point_style not in [2]: - errmsg = ( - "point_style %s is not implement, only point_style==2" % point_style - ) + errmsg = 'point_style %s is not implement, only point_style==2' \ + % point_style raise NotImplementedError(errmsg) - - if self.output_format == "ascii": + + if self.output_format == 'ascii': output_format = 1 - elif self.output_format == "binary32": + elif self.output_format == 'binary32': output_format = 2 - elif self.output_format in ["binary", "binary64"]: + elif self.output_format in ['binary','binary64']: output_format = 3 else: errmsg = "fgout output_format must be ascii, binary32, or binary64" raise NotImplementedError(errmsg) - assert self.tstart is not None, "Need to set tstart" - assert self.tend is not None, "Need to set tend" - assert self.nout is not None, "Need to set nout" - assert self.point_style is not None, "Need to set point_style" + # write header, independent of point_style: - # fid = open(self.input_file_name,'w') + #fid = open(self.input_file_name,'w') fid.write("\n") fid.write("%i # fgno\n" % self.fgno) - fid.write("%16.10e # tstart\n" % self.tstart) - fid.write("%16.10e # tend\n" % self.tend) - fid.write("%i %s # nout\n" % (self.nout, 11 * " ")) - fid.write("%i %s # point_style\n" % (self.point_style, 12 * " ")) - fid.write("%i %s # output_format\n" % (output_format, 12 * " ")) - print("fgout grid %i has point_style = %i" % (self.fgno, point_style)) + fid.write("%i # output_style\n" \ + % self.output_style) + + if self.output_style == 1: + assert self.tstart is not None, 'Need to set tstart' + assert self.tend is not None, 'Need to set tend' + assert self.nout is not None, 'Need to set nout' + fid.write("%i %s # nout\n" % (self.nout, 11*" ")) + fid.write("%16.10e # tstart\n" % self.tstart) + fid.write("%16.10e # tend\n" % self.tend) + elif self.output_style == 2: + self.nout = len(self.output_times) + fid.write("%i %s # nout\n" % (self.nout, 11*" ")) + + # remove [] and , from list of times: + output_times_str = repr(list(self.output_times))[1:-1] + output_times_str = output_times_str.replace(',','') + fid.write("%s # output_times\n" % output_times_str) + else: + raise ValueError('fgout output_style must be 1 or 2') + fid.write("%i %s # point_style\n" \ + % (self.point_style,12*" ")) + fid.write("%i %s # output_format\n" \ + % (output_format,12*" ")) + + print('fgout grid %i has point_style = %i' % (self.fgno, point_style)) + if point_style == 2: # 2d grid of points - x1, x2 = self.x1, self.x2 - y1, y2 = self.y1, self.y2 - nx, ny = self.nx, self.ny + x1,x2 = self.x1, self.x2 + y1,y2 = self.y1, self.y2 + nx,ny = self.nx, self.ny + + dx = (x2-x1)/nx # x1,x2 are cell edges + dy = (y2-y1)/ny # y1,y2 are cell edges - dx = (x2 - x1) / nx # x1,x2 are cell edges - dy = (y2 - y1) / ny # y1,y2 are cell edges + npts = nx*ny - npts = nx * ny + fid.write("%i %i %s # nx,ny\n" \ + % (nx,ny,10*" ")) + fid.write("%16.10e %20.10e # x1, y1\n" % (x1,y1)) + fid.write("%16.10e %20.10e # x2, y2\n" % (x2,y2)) - fid.write("%i %i %s # nx,ny\n" % (nx, ny, 10 * " ")) - fid.write("%16.10e %20.10e # x1, y1\n" % (x1, y1)) - fid.write("%16.10e %20.10e # x2, y2\n" % (x2, y2)) + print(" specifying fgout grid with shape %i by %i, with %i points" \ + % (nx,ny,npts)) + print(" lower left = (%15.10f,%15.10f)" % (x1,y1)) + print(" upper right = (%15.10f,%15.10f)" % (x2,y2)) + print(" dx = %15.10e, dy = %15.10e" % (dx,dy)) + + fid.write("%s # dclaw" % self.dclaw) - print( - " specifying fgout grid with shape %i by %i, with %i points" - % (nx, ny, npts) - ) - print(" lower left = (%15.10f,%15.10f)" % (x1, y1)) - print(" upper right = (%15.10f,%15.10f)" % (x2, y2)) - print(" dx = %15.10e, dy = %15.10e" % (dx, dy)) def read_frame(self, frameno): """ @@ -437,10 +468,8 @@ def read_frame(self, frameno): try: fr = self.plotdata.getframe(frameno) except: - print( - "*** Could not read fgout grid %i frame %i from %s" - % (self.fgno, frameno, self.plotdata.outdir) - ) + print('*** Could not read fgout grid %i frame %i from %s' \ + % (self.fgno,frameno,self.plotdata.outdir)) raise state = fr.states[0] # only 1 AMR grid patch = state.patch @@ -451,47 +480,48 @@ def read_frame(self, frameno): fgout_frame.q = state.q fgout_frame.t = state.t - + fgout_frame.frameno = frameno - - X, Y = patch.grid.p_centers[:2] - + + X,Y = patch.grid.p_centers[:2] + try: fgoutX = self.X fgoutY = self.Y except: # presumably x,y, etc. were not set for this fgout_grid # (e.g. by read_fgout_grids_data) so infer from this frame and set - + # reset most attributes including private _x etc to None: - self.__init__( - fgno=self.fgno, outdir=self.outdir, output_format=self.output_format - ) - - print(" Setting grid attributes based on Frame %i:" % frameno) - x = X[:, 0] - y = Y[0, :] + self.__init__(fgno=self.fgno,outdir=self.outdir, + output_format=self.output_format) + + print(' Setting grid attributes based on Frame %i:' % frameno) + x = X[:,0] + y = Y[0,:] dx = x[1] - x[0] dy = y[1] - y[0] - self.x1 = x[0] - dx / 2 - self.x2 = x[-1] + dx / 2 - self.y1 = y[0] - dy / 2 - self.y2 = y[-1] + dy / 2 + self.x1 = x[0] - dx/2 + self.x2 = x[-1] + dx/2 + self.y1 = y[0] - dy/2 + self.y2 = y[-1] + dy/2 self.nx = len(x) self.ny = len(y) fgoutX = self.X # will be computed from info above fgoutY = self.Y # will be computed from info above - print(" Grid edges: ", self.extent_edges) - print(" with %i cells in x and %i cells in y" % (self.nx, self.ny)) - + print(' Grid edges: ',self.extent_edges) + print(' with %i cells in x and %i cells in y' \ + % (self.nx,self.ny)) + + if not numpy.allclose(X, fgoutX): - errmsg = "*** X read from output does not match fgout_grid.X" + errmsg = '*** X read from output does not match fgout_grid.X' raise ValueError(errmsg) if not numpy.allclose(Y, fgoutY): - errmsg = "*** Y read from output does not match fgout_grid.Y" + errmsg = '*** Y read from output does not match fgout_grid.Y' raise ValueError(errmsg) - + return fgout_frame @@ -499,53 +529,47 @@ def read_frame(self, frameno): # Functions for interpolating from fgout grid to arbitrary points, # useful for example if using velocity field to model particle/debris motion - -def make_fgout_fcn_xy( - fgout, qoi, method="nearest", bounds_error=False, fill_value=numpy.nan -): +def make_fgout_fcn_xy(fgout, qoi, method='nearest', + bounds_error=False, fill_value=numpy.nan): """ Create a function that can be called at (x,y) and return the qoi interpolated in space from the fgout array. - - qoi should be a string (e.g. 'h', 'u' or 'v') corresponding to + + qoi should be a string (e.g. 'h', 'u' or 'v') corresponding to an attribute of fgout. - - The function returned takes arguments x,y that can be floats or + + The function returned takes arguments x,y that can be floats or (equal length) 1D arrays of values that lie within the spatial - extent of fgout. - - bounds_error and fill_value determine the behavior if (x,y) is not in + extent of fgout. + + bounds_error and fill_value determine the behavior if (x,y) is not in the bounds of the data, as in scipy.interpolate.RegularGridInterpolator. """ - + from scipy.interpolate import RegularGridInterpolator - + try: - q = getattr(fgout, qoi) + q = getattr(fgout,qoi) except: - print("*** fgout missing attribute qoi = %s?" % qoi) - - err_msg = ( - "*** q must have same shape as fgout.X\n" - + "fgout.X.shape = %s, q.shape = %s" % (fgout.X.shape, q.shape) - ) + print('*** fgout missing attribute qoi = %s?' % qoi) + + err_msg = '*** q must have same shape as fgout.X\n' \ + + 'fgout.X.shape = %s, q.shape = %s' % (fgout.X.shape,q.shape) assert fgout.X.shape == q.shape, err_msg + + x1 = fgout.X[:,0] + y1 = fgout.Y[0,:] + fgout_fcn1 = RegularGridInterpolator((x1,y1), q, method=method, + bounds_error=bounds_error, fill_value=fill_value) - x1 = fgout.X[:, 0] - y1 = fgout.Y[0, :] - fgout_fcn1 = RegularGridInterpolator( - (x1, y1), q, method=method, bounds_error=bounds_error, fill_value=fill_value - ) - - def fgout_fcn(x, y): + def fgout_fcn(x,y): """ Function that can be evaluated at single point or arrays (x,y). """ from numpy import array, vstack - xa = array(x) ya = array(y) - xyout = vstack((xa, ya)).T + xyout = vstack((xa,ya)).T qout = fgout_fcn1(xyout) if len(qout) == 1: qout = qout[0] # return scalar @@ -554,259 +578,208 @@ def fgout_fcn(x, y): return fgout_fcn -def make_fgout_fcn_xyt( - fgout1, - fgout2, - qoi, - method_xy="nearest", - method_t="linear", - bounds_error=False, - fill_value=numpy.nan, -): +def make_fgout_fcn_xyt(fgout1, fgout2, qoi, method_xy='nearest', + method_t='linear', bounds_error=False, + fill_value=numpy.nan): """ Create a function that can be called at (x,y,t) and return the qoi interpolated in space and time between the two frames fgout1 and fgout2. - - qoi should be a string (e.g. 'h', 'u' or 'v') corresponding to + + qoi should be a string (e.g. 'h', 'u' or 'v') corresponding to an attribute of fgout. - + method_xy is the method used in creating the spatial interpolator, and is passed to make_fgout_fcn_xy. - + method_t is the method used for interpolation in time, currently only 'linear' is supported, which linearly interpolates. - - bounds_error and fill_value determine the behavior if (x,y,t) is not in + + bounds_error and fill_value determine the behavior if (x,y,t) is not in the bounds of the data. - + The function returned takes arguments x,y (floats or equal-length 1D arrays) of values that lie within the spatial extent of fgout1, fgout2 (which are assumed to cover the same uniform grid at different times) - and t should be a float that lies between fgout1.t and fgout2.t. + and t should be a float that lies between fgout1.t and fgout2.t. """ - - assert numpy.allclose(fgout1.X, fgout2.X), "*** fgout1 and fgout2 must have same X" - assert numpy.allclose(fgout1.Y, fgout2.Y), "*** fgout1 and fgout2 must have same Y" - + + assert numpy.allclose(fgout1.X, fgout2.X), \ + '*** fgout1 and fgout2 must have same X' + assert numpy.allclose(fgout1.Y, fgout2.Y), \ + '*** fgout1 and fgout2 must have same Y' + t1 = fgout1.t t2 = fgout2.t - # assert t1 < t2, '*** expected fgout1.t < fgout2.t' - - fgout1_fcn_xy = make_fgout_fcn_xy( - fgout1, qoi, method=method_xy, bounds_error=bounds_error, fill_value=fill_value - ) - fgout2_fcn_xy = make_fgout_fcn_xy( - fgout2, qoi, method=method_xy, bounds_error=bounds_error, fill_value=fill_value - ) - - def fgout_fcn(x, y, t): + #assert t1 < t2, '*** expected fgout1.t < fgout2.t' + + fgout1_fcn_xy = make_fgout_fcn_xy(fgout1, qoi, method=method_xy, + bounds_error=bounds_error, fill_value=fill_value) + fgout2_fcn_xy = make_fgout_fcn_xy(fgout2, qoi, method=method_xy, + bounds_error=bounds_error, fill_value=fill_value) + + def fgout_fcn(x,y,t): """ Function that can be evaluated at single point or arrays (x,y) at a single time t. """ from numpy import array, ones - xa = array(x) ya = array(y) tol = 1e-6 # to make sure it works ok when called with t=t1 or t=t2 - if t1 - tol <= t <= t2 + tol: - alpha = (t - t1) / (t2 - t1) + if t1-tol <= t <= t2+tol: + alpha = (t-t1)/(t2-t1) elif bounds_error: - errmsg = "*** argument t=%g should be between t1=%g and t2=%g" % (t, t1, t2) + errmsg = '*** argument t=%g should be between t1=%g and t2=%g' \ + % (t,t1,t2) raise ValueError(errmsg) else: qout = fill_value * ones(xa.shape) return qout - qout1 = fgout1_fcn_xy(x, y) - qout2 = fgout2_fcn_xy(x, y) + qout1 = fgout1_fcn_xy(x,y) + qout2 = fgout2_fcn_xy(x,y) - if method_t == "linear": + if method_t == 'linear': if t1 <= t <= t2: - alpha = (t - t1) / (t2 - t1) - qout = (1 - alpha) * qout1 + alpha * qout2 + alpha = (t-t1)/(t2-t1) + qout = (1-alpha)*qout1 + alpha*qout2 else: - raise NotImplementedError("method_t = %s not supported" % method_t) - + raise NotImplementedError('method_t = %s not supported' % method_t) + return qout - + return fgout_fcn - # =============================== # Functions for writing a set of fgout frames as a netCDF file, and -# reading such a file: - - -def write_netcdf( - fgout_frames, - fname_nc="fgout_frames.nc", - qois=["h", "hu", "hv", "eta"], - datatype="f4", - include_B0=False, - include_Bfinal=False, - description="", - verbose=True, -): +# reading such a file: + +def write_netcdf(fgout_frames, fname_nc='fgout_frames.nc', + qois = ['h','hu','hv','eta'], datatype='f4', + include_B0=False, include_Bfinal=False, + description='', verbose=True): """ Write a list of fgout frames (at different times on the same rectangular grid) to a single netCDF file, with some metadata and the topography, if desired. - + fgout_frames should be a list of FGoutFrame objects, all of the same size and at increasing times. - + fname_nc is the name of the file to write. - + qois is a list of strings, the quantities of interest to include in file. - This could include any of: + This could include any of: 'h', 'eta', 'hu', 'hv', 'u', 'v', 's', 'hss', 'B'. All other quantities can be computed from h, hu, hv, eta, the original fgout variables from GeoClaw, but for some applications you might only want to save 'h' and 's', for example. - + datatype should be 'f4' [default] or 'f8', specifying bytes per qoi value. 'f8' has full precision of the original data, but the file will be twice as large and may not be needed for downstream applications. - + Note that the topography B = eta - h, so it is not necessary to store all three of these. Also, B is often the same for all frames, so rather than storing B at each frame as a qoi, two other options are also provided (and then storing eta or h for all frames allows calculating the other): - + include_Bfinal: If True, include the topography B array from the final frame as the Bfinal array. - + include_B0: If True, include the topography B array from the first frame - as the B0 array. This is only useful if, e.g., the first frame is initial + as the B0 array. This is only useful if, e.g., the first frame is initial topography before co-seismic deformation, and at later times the topography - is always equal to Bfinal. - + is always equal to Bfinal. + `description` is a string that will be added as metadata. - A metadata field `history` will also be added, which includes the - time the file was created and the path to the directory where it was made. - """ - - import time - from datetime import datetime, timedelta - + A metadata field `history` will also be added, which includes the + time the file was created and the path to the directory where it was made. + """ + import netCDF4 - from cftime import date2num, num2date - + from datetime import datetime, timedelta + from cftime import num2date, date2num + import time timestr = time.ctime(time.time()) # current time for metadata - + fg_times = numpy.array([fg.t for fg in fgout_frames]) - + if verbose: - print("Creating %s with fgout frames at times: " % fname_nc) + print('Creating %s with fgout frames at times: ' % fname_nc) print(fg_times) - + fg0 = fgout_frames[0] x = fg0.x y = fg0.y - + xs = numpy.array([fg.x for fg in fgout_frames]) ys = numpy.array([fg.y for fg in fgout_frames]) # assert same for all times + + units = {'h':'meters', 'eta':'meters', 'hu':'m^2/s', 'hv':'m^2/s', + 'u':'m/s', 'v':'m/s', 's':'m/s', 'hss':'m^3/s^2', 'B':'meters'} - units = { - "h": "meters", - "eta": "meters", - "hu": "m^2/s", - "hv": "m^2/s", - "u": "m/s", - "v": "m/s", - "s": "m/s", - "hss": "m^3/s^2", - "B": "meters", - } - - with netCDF4.Dataset(fname_nc, "w") as rootgrp: + with netCDF4.Dataset(fname_nc, 'w') as rootgrp: rootgrp.description = description rootgrp.history = "Created " + timestr rootgrp.history += " in %s; " % os.getcwd() - lon = rootgrp.createDimension("lon", len(x)) - longitudes = rootgrp.createVariable("lon", "f8", ("lon",)) + lon = rootgrp.createDimension('lon', len(x)) + longitudes = rootgrp.createVariable('lon','f8',('lon',)) longitudes[:] = x - longitudes.units = "degrees_east" + longitudes.units = 'degrees_east' - lat = rootgrp.createDimension("lat", len(y)) - latitudes = rootgrp.createVariable("lat", "f8", ("lat",)) + lat = rootgrp.createDimension('lat', len(y)) + latitudes = rootgrp.createVariable('lat','f8',('lat',)) latitudes[:] = y - latitudes.units = "degrees_north" - - time = rootgrp.createDimension("time", len(fg_times)) - times = rootgrp.createVariable("time", "f8", ("time",)) + latitudes.units = 'degrees_north' + + time = rootgrp.createDimension('time', len(fg_times)) + times = rootgrp.createVariable('time','f8',('time',)) times[:] = fg_times - times.units = "seconds" - + times.units = 'seconds' + if 0: # Could make times be datetimes relative to some event time, e.g.: - times.units = "seconds since 1700-01-26 21:00:00.0" - times.calendar = "gregorian" - dates = [ - datetime(1700, 1, 26, 21) + timedelta(seconds=ss) for ss in fg_times - ] - times[:] = date2num(dates, units=times.units, calendar=times.calendar) - + times.units = 'seconds since 1700-01-26 21:00:00.0' + times.calendar = 'gregorian' + dates = [datetime(1700,1,26,21) + timedelta(seconds=ss) \ + for ss in fg_times] + times[:] = date2num(dates,units=times.units,calendar=times.calendar) + if include_B0: - B0 = rootgrp.createVariable( - "B0", - datatype, - ( - "lon", - "lat", - ), - ) - B0[:, :] = fg0.B - B0.units = "meters" + B0 = rootgrp.createVariable('B0',datatype,('lon','lat',)) + B0[:,:] = fg0.B + B0.units = 'meters' if include_Bfinal: fg_final = fgout_frames[-1] - Bfinal = rootgrp.createVariable( - "Bfinal", - datatype, - ( - "lon", - "lat", - ), - ) - Bfinal[:, :] = fg_final.B - Bfinal.units = "meters" - + Bfinal = rootgrp.createVariable('Bfinal',datatype,('lon','lat',)) + Bfinal[:,:] = fg_final.B + Bfinal.units = 'meters' + for qoi in qois: - qoi_frames = [getattr(fgout, qoi) for fgout in fgout_frames] - qoi_var = rootgrp.createVariable( - qoi, - datatype, - ( - "time", - "lon", - "lat", - ), - ) - qoi_var[:, :, :] = qoi_frames + qoi_frames = [getattr(fgout,qoi) for fgout in fgout_frames] + qoi_var = rootgrp.createVariable(qoi,datatype,('time','lon','lat',)) + qoi_var[:,:,:] = qoi_frames qoi_var.units = units[qoi] - def get_as_array(var, rootgrp, verbose=True): """ - Utility function to retrieve variable from netCDF file and convert to + Utility function to retrieve variable from netCDF file and convert to numpy array. """ a = rootgrp.variables.get(var, None) if a is not None: - if verbose: - print(" Loaded %s with shape %s" % (var, repr(a.shape))) + if verbose: print(' Loaded %s with shape %s' % (var,repr(a.shape))) return numpy.array(a) else: - if verbose: - print(" Did not find %s" % var) - return None + if verbose: print(' Did not find %s' % var) + return None def read_netcdf_arrays(fname_nc, qois, verbose=True): @@ -817,40 +790,41 @@ def read_netcdf_arrays(fname_nc, qois, verbose=True): 'h', 'eta', 'hu', 'hv', 'u', 'v', 's', 'hss', 'B'. qois can also include the time-independent 'B0' and/or 'Bfinal' - - Returns + + Returns x, y, t, qoi_arrays where x,y define the longitude, latitudes, t is the times of the frames, and qoi_arrays is a dictionary indexed by the strings from qois. - + Each dict element is an array with shape (len(t), len(x), len(y)) for time-dependent qoi's, or (len(x), len(y)) for B0 or Bfinal, or None if that qoi was not found in the netCDF file. """ - + import netCDF4 - with netCDF4.Dataset(fname_nc, "r") as rootgrp: + with netCDF4.Dataset(fname_nc, 'r') as rootgrp: if verbose: - print("Reading data to fgout frames from nc file", fname_nc) - print(" nc file description: ", rootgrp.description) - print("History: ", rootgrp.history) - - x = get_as_array("lon", rootgrp, verbose) - y = get_as_array("lat", rootgrp, verbose) - t = get_as_array("time", rootgrp, verbose) - + print('Reading data to fgout frames from nc file',fname_nc) + print(' nc file description: ', rootgrp.description) + print('History: ', rootgrp.history) + + x = get_as_array('lon', rootgrp, verbose) + y = get_as_array('lat', rootgrp, verbose) + t = get_as_array('time', rootgrp, verbose) + vars = list(rootgrp.variables) - + qoi_arrays = {} for qoi in qois: qoi_array = get_as_array(qoi, rootgrp, verbose) qoi_arrays[qoi] = qoi_array - - return x, y, t, qoi_arrays - + + return x,y,t,qoi_arrays + + def read_netcdf(fname_nc, fgout_grid=None, verbose=True): """ @@ -859,55 +833,55 @@ def read_netcdf(fname_nc, fgout_grid=None, verbose=True): the qoi's 'h','hu','hv','eta' required to reconstruct the q array as output by GeoClaw. """ - + import netCDF4 - with netCDF4.Dataset(fname_nc, "r") as rootgrp: + with netCDF4.Dataset(fname_nc, 'r') as rootgrp: if 1: - print("Reading data to fgout frames from nc file", fname_nc) - print(" nc file description: ", rootgrp.description) - print("History: ", rootgrp.history) + print('Reading data to fgout frames from nc file',fname_nc) + print(' nc file description: ', rootgrp.description) + print('History: ', rootgrp.history) - x = get_as_array("lon", rootgrp, verbose) - y = get_as_array("lat", rootgrp, verbose) - t = get_as_array("time", rootgrp, verbose) + x = get_as_array('lon', rootgrp, verbose) + y = get_as_array('lat', rootgrp, verbose) + t = get_as_array('time', rootgrp, verbose) vars = list(rootgrp.variables) + - for qoi in ["h", "hu", "hv", "eta"]: - errmsg = "*** Cannot reconstruct fgout frame without %s" % qoi + for qoi in ['h','hu','hv','eta']: + errmsg = '*** Cannot reconstruct fgout frame without %s' % qoi assert qoi in vars, errmsg - h = get_as_array("h", rootgrp, verbose) - hu = get_as_array("hu", rootgrp, verbose) - hv = get_as_array("hv", rootgrp, verbose) - eta = get_as_array("eta", rootgrp, verbose) - + h = get_as_array('h', rootgrp, verbose) + hu = get_as_array('hu', rootgrp, verbose) + hv = get_as_array('hv', rootgrp, verbose) + eta = get_as_array('eta', rootgrp, verbose) + if (x is None) or (y is None): - print("*** Could not create grid") + print('*** Could not create grid') else: - X, Y = numpy.meshgrid(x, y) - + X,Y = numpy.meshgrid(x,y) + fgout_frames = [] - + for k in range(eta.shape[0]): fgout = FGoutFrame(fgout_grid=fgout_grid, frameno=k) fgout.x = x fgout.y = y fgout.t = t[k] - fgout.q = numpy.empty((4, eta.shape[1], eta.shape[2])) - fgout.q[0, :, :] = h[k, :, :] - fgout.q[1, :, :] = hu[k, :, :] - fgout.q[2, :, :] = hv[k, :, :] - fgout.q[3, :, :] = eta[k, :, :] + fgout.q = numpy.empty((4,eta.shape[1],eta.shape[2])) + fgout.q[0,:,:] = h[k,:,:] + fgout.q[1,:,:] = hu[k,:,:] + fgout.q[2,:,:] = hv[k,:,:] + fgout.q[3,:,:] = eta[k,:,:] fgout.X = X fgout.Y = Y fgout_frames.append(fgout) - - print("Created fgout_frames as list of length %i" % len(fgout_frames)) - + + print('Created fgout_frames as list of length %i' % len(fgout_frames)) + return fgout_frames - def print_netcdf_info(fname_nc): """ Print out info about the contents of a netCDF file contining fgout frames, @@ -915,19 +889,21 @@ def print_netcdf_info(fname_nc): """ import netCDF4 - with netCDF4.Dataset(fname_nc, "r") as rootgrp: - x = get_as_array("lon", rootgrp, verbose=False) - y = get_as_array("lat", rootgrp, verbose=False) - t = get_as_array("time", rootgrp, verbose=False) + with netCDF4.Dataset(fname_nc, 'r') as rootgrp: + x = get_as_array('lon', rootgrp, verbose=False) + y = get_as_array('lat', rootgrp, verbose=False) + t = get_as_array('time', rootgrp, verbose=False) vars = list(rootgrp.variables) - - print("===================================================") - print("netCDF file %s contains:" % fname_nc) - print("description: \n", rootgrp.description) - print("history: \n", rootgrp.history) - print("%i longitudes from %.6f to %.6f" % (len(x), x[0], x[-1])) - print("%i latitudes from %.6f to %.6f" % (len(y), y[0], y[-1])) - print("%i times from %.3f to %.3f" % (len(t), t[0], t[-1])) - print("variables: ", vars) - print("===================================================") + + print('===================================================') + print('netCDF file %s contains:' % fname_nc) + print('description: \n', rootgrp.description) + print('history: \n', rootgrp.history) + print('%i longitudes from %.6f to %.6f' % (len(x),x[0],x[-1])) + print('%i latitudes from %.6f to %.6f' % (len(y),y[0],y[-1])) + print('%i times from %.3f to %.3f' % (len(t),t[0],t[-1])) + print('variables: ',vars) + print('===================================================') + + diff --git a/src/python/geoclaw/surge/plot.py b/src/python/geoclaw/surge/plot.py index 254d74f7b..c2ada663f 100644 --- a/src/python/geoclaw/surge/plot.py +++ b/src/python/geoclaw/surge/plot.py @@ -41,6 +41,7 @@ surge_data = geodata.SurgeData() + class track_data(object): """Read in storm track data from run output""" @@ -84,12 +85,16 @@ def gauge_locations(current_data, gaugenos='all'): yoffset=0.02) -def gaugetopo(current_data): - q = current_data.q - h = q[0, :] - eta = q[3, :] - topo = eta - h - return topo +def gauge_dry_regions(cd, dry_tolerance=1e-16): + """Masked array of zeros where gauge is dry.""" + return np.ma.masked_where(np.abs(cd.gaugesoln.q[0, :]) > dry_tolerance, + np.zeros(cd.gaugesoln.q[0, :].shape)) + + +def gauge_surface(cd, dry_tolerance=1e-16): + """Sea surface at gauge masked when dry.""" + return np.ma.masked_where(np.abs(cd.gaugesoln.q[0, :]) < dry_tolerance, + cd.gaugesoln.q[3, :]) def plot_landfall_gauge(gauge, axes, landfall=0.0, style='b', kwargs={}): @@ -97,6 +102,9 @@ def plot_landfall_gauge(gauge, axes, landfall=0.0, style='b', kwargs={}): This will transform the plot so that it is relative to the landfall value provided. + + This can be done using `plotaxes.time_scale` instead so this function will + be deprecated and removed in a future release. """ axes = plt.gca() @@ -492,19 +500,21 @@ def plot_track(t, x, y, wind_radius, wind_speed, Pc, name=None): # Returns axes # Storm with category plotting function # ======================================================================== -def add_track(Storm, axes, plot_package=None, category_color=None, legend_loc='best', + + +def add_track(Storm, axes, plot_package=None, category_color=None, legend_loc='best', intensity=False, categorization="NHC", limits=None, track_color='red'): if category_color is None: - category_color = {5: 'red', - 4: 'orange', - 3: 'yellow', - 2: 'blue', # edit color - 1: 'violet', - 0: 'black', - -1: 'gray'} + category_color = {5: 'red', + 4: 'orange', + 3: 'yellow', + 2: 'blue', # edit color + 1: 'violet', + 0: 'black', + -1: 'gray'} category = Storm.category(categorization=categorization) - + # make it if intensity = true # basic plotting @@ -520,52 +530,55 @@ def add_track(Storm, axes, plot_package=None, category_color=None, legend_loc='b axes.set_xlabel("Longitude") axes.set_ylabel("Latitude") - - categories_legend = [] - if intensity and categorization is "NHC": + if intensity and categorization == "NHC": categories_legend = [] - # plotitem = plotaxes.new_plotitem(name='category', plot_type='1d_plot') + # plotitem = plotaxes.new_plotitem(name='category', plot_type='1d_plot') if (-1 in category): - negativeone = mlines.Line2D([], [], color=category_color[-1], marker='s', ls='', label="Tropical Depression") + negativeone = mlines.Line2D( + [], [], color=category_color[-1], marker='s', ls='', label="Tropical Depression") categories_legend.append(negativeone) if (0 in category): - zero = mlines.Line2D([], [], color=category_color[0], marker='s', ls='', label="Tropical Storn") + zero = mlines.Line2D( + [], [], color=category_color[0], marker='s', ls='', label="Tropical Storn") categories_legend.append(zero) if (1 in category): - one = mlines.Line2D([], [], color=category_color[1], marker='s', ls='', label="Category 1") + one = mlines.Line2D([], [], color=category_color[1], + marker='s', ls='', label="Category 1") categories_legend.append(one) if (2 in category): - two = mlines.Line2D([], [], color=category_color[2], marker='s', ls='', label="Category 2") + two = mlines.Line2D([], [], color=category_color[2], + marker='s', ls='', label="Category 2") categories_legend.append(two) if (3 in category): - three = mlines.Line2D([], [], color=category_color[3], marker='s', ls='', label="Category 3") + three = mlines.Line2D( + [], [], color=category_color[3], marker='s', ls='', label="Category 3") categories_legend.append(three) if (4 in category): - four = mlines.Line2D([], [], color=category_color[4], marker='s', ls='', label="Category 4") + four = mlines.Line2D( + [], [], color=category_color[4], marker='s', ls='', label="Category 4") categories_legend.append(four) if (5 in category): - five = mlines.Line2D([], [], color=category_color[5], marker='s', ls='', label="Category 5") + five = mlines.Line2D( + [], [], color=category_color[5], marker='s', ls='', label="Category 5") categories_legend.append(five) plt.legend(handles=categories_legend, loc=legend_loc) - + # if bounds is not None: # plotitem.pcolor_cmin = bounds[0] # plotitem.pcolor_cmax = bounds[1] return axes - - # if plot_type == 'pcolor' or plot_type == 'imshow': # plotitem = plotaxes.new_plotitem(name='wind', plot_type='2d_pcolor') # plotitem.plot_var = wind_speed diff --git a/src/python/geoclaw/surge/quality.py b/src/python/geoclaw/surge/quality.py index 6584efbdb..e62db2c98 100644 --- a/src/python/geoclaw/surge/quality.py +++ b/src/python/geoclaw/surge/quality.py @@ -9,13 +9,14 @@ class InstabilityError(Exception): pass + def quality_check(model_output_dir, - regions_to_check = [[-81,22,-40,55], # atlantic - [-100,15,-82,32], # gulf - [-88.5,13.25,-70,19.75]], # carribean - frames_to_check = 4, - mean_tol_abs = .01, - min_depth = 300): + regions_to_check=[[-81, 22, -40, 55], # atlantic + [-100, 15, -82, 32], # gulf + [-88.5, 13.25, -70, 19.75]], # carribean + frames_to_check=4, + mean_tol_abs=.01, + min_depth=300): """Run a simple check to flag runs that were potentially numerically unstable. This function looks through the final *frames_to_check* frames of a model output and checks all cells in AMR level 1 that are below *min_depth* and @@ -23,7 +24,7 @@ def quality_check(model_output_dir, to encompass individual basins). If the absolute value of the mean surface height for these cells, which should not really have much surge, is above/mean_tol_abs, it raises an error. - + :Input: - *model_output_dir* (str) Path to the output directory for a given model - *regions_to_check* (list of lists of floats) A list of 4-element lists @@ -33,17 +34,17 @@ def quality_check(model_output_dir, height within any region in *regions_to_check* is above this value (meters). - *min_depth* (float) Only cells that are below *min_depth* (meters) are checked, in order to avoid including cells that potentially should have large surge values. - + :Raises: - InstabilityError if the model is flagged as potentially unstable in one of the regions. """ - + # find which frame is last - output_files = glob(join(model_output_dir,'fort.b*')) + output_files = glob(join(model_output_dir, 'fort.b*')) last_frame = int(output_files[-1].split('/')[-1][-4:]) # get sl_init - with open(join(model_output_dir,'geoclaw.data'),'r') as f: + with open(join(model_output_dir, 'geoclaw.data'), 'r') as f: for l in f: if '=: sea_level' in l: sl_init = float(l.strip().split()[0]) @@ -51,44 +52,45 @@ def quality_check(model_output_dir, # bring in solution soln = Solution() for frame in range(last_frame-frames_to_check+1, last_frame+1): - soln.read(frame, path = model_output_dir, file_format='binary', read_aux=False) + soln.read(frame, path=model_output_dir, + file_format='binary', read_aux=False) for r in regions_to_check: all_vals = np.array([]) for s in soln.states: # only looking at lowest AMR level - if s.patch.level >1: + if s.patch.level > 1: continue x = s.grid.c_centers[0] y = s.grid.c_centers[1] - eta = s.q[3,:,:] - topo = eta - s.q[0,:,:] - - # only count - eta = np.where(topo<(-min_depth),eta,np.nan) - in_reg = eta[np.ix_((x[:,0]>=r[0]) & (x[:,0]<=r[2]), - (y[0,:]>=r[1]) & (y[0,:]<=r[3]))] - all_vals = np.concatenate([all_vals,in_reg.flatten()]) + eta = s.q[3, :, :] + topo = eta - s.q[0, :, :] + + # only count + eta = np.where(topo < (-min_depth), eta, np.nan) + in_reg = eta[np.ix_((x[:, 0] >= r[0]) & (x[:, 0] <= r[2]), + (y[0, :] >= r[1]) & (y[0, :] <= r[3]))] + all_vals = np.concatenate([all_vals, in_reg.flatten()]) # adjust for sl_init all_vals = all_vals - sl_init - if all_vals.shape[0]>0: + if all_vals.shape[0] > 0: if abs(np.nanmean(all_vals)) > mean_tol_abs: raise InstabilityError("Model possibly unstable due to large magnitude deep " "ocean surge at end of run ({:.1f} cm in region {})".format( - np.nanmean(all_vals)*100, r)) + np.nanmean(all_vals)*100, r)) - -def get_max_boundary_fluxes(model_output_dir, max_refinement_depth_to_check = None, - frames_to_check = 1): + +def get_max_boundary_fluxes(model_output_dir, max_refinement_depth_to_check=None, + frames_to_check=1): """A common cause of instability is a persistant normal flux at the boundary. This code checks the last frame(s) of a model output and returns the maximum magnitude normal fluxes and currents at each boundary, as well as the maximum magnitude fluxes and currents observed within the entire domain. - + :Input: - *model_output_dir* (str) Path to the output directory for a given model - *max_refinement_depth_to_check* (int or None, optional) How many refinement levels to @@ -100,20 +102,20 @@ def get_max_boundary_fluxes(model_output_dir, max_refinement_depth_to_check = No """ # find which frame is last - output_files = glob(join(model_output_dir,'fort.q*')) + output_files = glob(join(model_output_dir, 'fort.q*')) frames = [int(i.split('/')[-1][-4:]) for i in output_files] last_frame = max(frames) - + # get domain and file format - with open(join(model_output_dir,'claw.data'),'r') as f: + with open(join(model_output_dir, 'claw.data'), 'r') as f: for l in f: if '=: lower' in l: - xl,yl = [float(i) for i in l.strip().split()[:2]] + xl, yl = [float(i) for i in l.strip().split()[:2]] elif '=: upper' in l: - xu,yu = [float(i) for i in l.strip().split()[:2]] + xu, yu = [float(i) for i in l.strip().split()[:2]] elif '=: output_format' in l: of = int(l.strip().split()[0]) - 1 - file_format = ['ascii','netcdf','binary'][of] + file_format = ['ascii', 'netcdf', 'binary'][of] maxhxl = -np.inf maxhyl = -np.inf @@ -128,10 +130,11 @@ def get_max_boundary_fluxes(model_output_dir, max_refinement_depth_to_check = No maxcurrx_overall = -np.inf maxcurry_overall = -np.inf - for f in range(last_frame+1-frames_to_check,last_frame+1): + for f in range(last_frame+1-frames_to_check, last_frame+1): soln = Solution() - soln.read(f, path = model_output_dir, file_format=file_format, read_aux=False) + soln.read(f, path=model_output_dir, + file_format=file_format, read_aux=False) for s in soln.states: @@ -139,50 +142,56 @@ def get_max_boundary_fluxes(model_output_dir, max_refinement_depth_to_check = No if max_refinement_depth_to_check is not None: if s.patch.level > max_refinement_depth_to_check: continue - + # get rounding error tolerance delta = s.grid.dimensions[0].delta edge_tol = delta * .001 - + x = s.grid.c_centers[0] xedges = s.grid.c_nodes[0] y = s.grid.c_centers[1] yedges = s.grid.c_nodes[1] - eta = s.q[3,:,:] - h = s.q[0,:,:] - hx = s.q[1,:,:] + eta = s.q[3, :, :] + h = s.q[0, :, :] + hx = s.q[1, :, :] curr_x = hx / h - hy = s.q[2,:,:] + hy = s.q[2, :, :] curr_y = hy / h - topo = eta - s.q[0,:,:] - maxhx_overall = np.nanmax([np.abs(hx).max(),maxhx_overall]) - maxhy_overall = np.nanmax([np.abs(hy).max(),maxhy_overall]) - maxcurrx_overall = np.nanmax([np.nanmax(np.abs(curr_x)),maxcurrx_overall]) - maxcurry_overall = np.nanmax([np.nanmax(np.abs(curr_y)),maxcurry_overall]) - - if abs(xedges[0,0] - xl) < edge_tol: - maxhxl = np.nanmax([maxhxl,np.nanmax(np.abs(hx[0,:]))]) - maxcurrxl = np.nanmax([maxcurrxl,np.nanmax(np.abs(curr_x[0,:]))]) - if abs(xedges[-1,0] - xu) < edge_tol: - maxhxu = np.nanmax([maxhxu,np.nanmax(np.abs(hx[-1,:]))]) - maxcurrxu = np.nanmax([maxcurrxu,np.nanmax(np.abs(curr_x[-1,:]))]) - if abs(yedges[0,0] - yl) < edge_tol: - maxhyl = np.nanmax([maxhyl,np.nanmax(np.abs(hy[:,0]))]) - maxcurryl = np.nanmax([maxcurryl,np.nanmax(np.abs(curr_y[:,0]))]) - if abs(yedges[0,-1] - yu) < edge_tol: - maxhyu = np.nanmax([maxhyu,np.nanmax(np.abs(hy[:,-1]))]) - maxcurryu = np.nanmax([maxcurryu,np.nanmax(np.abs(curr_y[:,-1]))]) - - return ({'max_normal_fluxes':{'W': maxhxl, - 'E': maxhxu, - 'N': maxhyu, - 'S': maxhyl}, - 'max_normal_currents':{'W': maxcurrxl, - 'E': maxcurrxu, - 'N': maxcurryu, - 'S': maxcurryl}, + topo = eta - s.q[0, :, :] + maxhx_overall = np.nanmax([np.abs(hx).max(), maxhx_overall]) + maxhy_overall = np.nanmax([np.abs(hy).max(), maxhy_overall]) + maxcurrx_overall = np.nanmax( + [np.nanmax(np.abs(curr_x)), maxcurrx_overall]) + maxcurry_overall = np.nanmax( + [np.nanmax(np.abs(curr_y)), maxcurry_overall]) + + if abs(xedges[0, 0] - xl) < edge_tol: + maxhxl = np.nanmax([maxhxl, np.nanmax(np.abs(hx[0, :]))]) + maxcurrxl = np.nanmax( + [maxcurrxl, np.nanmax(np.abs(curr_x[0, :]))]) + if abs(xedges[-1, 0] - xu) < edge_tol: + maxhxu = np.nanmax([maxhxu, np.nanmax(np.abs(hx[-1, :]))]) + maxcurrxu = np.nanmax( + [maxcurrxu, np.nanmax(np.abs(curr_x[-1, :]))]) + if abs(yedges[0, 0] - yl) < edge_tol: + maxhyl = np.nanmax([maxhyl, np.nanmax(np.abs(hy[:, 0]))]) + maxcurryl = np.nanmax( + [maxcurryl, np.nanmax(np.abs(curr_y[:, 0]))]) + if abs(yedges[0, -1] - yu) < edge_tol: + maxhyu = np.nanmax([maxhyu, np.nanmax(np.abs(hy[:, -1]))]) + maxcurryu = np.nanmax( + [maxcurryu, np.nanmax(np.abs(curr_y[:, -1]))]) + + return ({'max_normal_fluxes': {'W': maxhxl, + 'E': maxhxu, + 'N': maxhyu, + 'S': maxhyl}, + 'max_normal_currents': {'W': maxcurrxl, + 'E': maxcurrxu, + 'N': maxcurryu, + 'S': maxcurryl}, 'domain_maxs': {'hu': maxhx_overall, 'hv': maxhy_overall, 'u': maxcurrx_overall, - 'v': maxcurry_overall}}) \ No newline at end of file + 'v': maxcurry_overall}}) diff --git a/src/python/geoclaw/surge/storm.py b/src/python/geoclaw/surge/storm.py index 4f1b4eae7..2fe89971a 100644 --- a/src/python/geoclaw/surge/storm.py +++ b/src/python/geoclaw/surge/storm.py @@ -40,11 +40,10 @@ import warnings from pathlib import Path -import numpy +import numpy as np import pandas as pd import xarray as xr from fsspec import FSMap -from six.moves import range import clawpack.geoclaw.units as units @@ -152,25 +151,26 @@ class Storm(object): *TODO:* Add description of unit handling :Attributes: - - *t* (list(datetime.datetiem)) Contains the time at which each entry of - the other arrays are at. These are expected to be *datetime* objects. - Note that when written some formats require a *time_offset* to be set. - - *eye_location* (ndarray(:, :)) location of the eye of the storm. - Default units are in signed decimcal longitude and latitude. + - *t* (list(float) or list(datetime.datetiem)) Contains the time at which + each entry of the other arrays are at. These are expected to + be *datetime* objects. Note that when written some formats require + a *time_offset* to be set. + - *eye_location* (ndarray(:, :)) location of the eye of the storm. Default + units are in signed decimal longitude and latitude. - *max_wind_speed* (ndarray(:)) Maximum wind speed. Default units are meters/second. - *max_wind_radius* (ndarray(:)) Radius at which the maximum wind speed occurs. Default units are meters. - - *central_pressure* (ndarray(:)) Central pressure of storm. Default - units are Pascals. + - *central_pressure* (ndarray(:)) Central pressure of storm. Default units + are Pascals. - *storm_radius* (ndarray(:)) Radius of storm, often defined as the last closed iso-bar of pressure. Default units are meters. - *time_offset* (datetime.datetime) A date time that as an offset for the - simulation time. This will default to the beginning of the first of the - year that the first time point is found in. + simulation time. This will default to the beginning of the first of + the year that the first time point is found in. - *wind_speeds* (ndarray(:, :)) Wind speeds defined in every record, such - as 34kt, 50kt, 64kt, etc and their radii. Default units are meters/second - and meters. + as 34kt, 50kt, 64kt, etc and their radii. Default units are + meters/second and meters. :Initialization: 1. Read in existing file at *path*. @@ -226,7 +226,7 @@ def __init__(self, path=None, file_format="ATCF", **kwargs): self.wind_speeds = None # Storm descriptions - not all formats provide these - self.name = None + self.name = None # Possibly a list of a storm's names self.basin = None # Basin containing storm self.ID = None # ID code - depends on format self.classification = None # Classification of storm (e.g. HU) @@ -239,10 +239,12 @@ def __init__(self, path=None, file_format="ATCF", **kwargs): # Basic object support def __str__(self): r"""""" - output = "Name: %s" % self.name - output = "\n".join( - (output, "Dates: %s - %s" % (self.t[0].isoformat(), self.t[-1].isoformat())) - ) + output = f"Name: {self.name}\n" + if isinstance(self.t[0], datetime.datetime): + output += f"Dates: {self.t[0].isoformat()}" + output += f" - {self.t[-1].isoformat()}" + else: + output += f"Dates: {self.t[0]} - {self.t[-1]}" return output def __repr__(self): @@ -305,15 +307,20 @@ def read_geoclaw(self, path, verbose=False): - *verbose* (bool) Output more info regarding reading. """ + # Attempt to get name from file if is follows the convention name.storm + if ".storm" in os.path.splitext(path): + self.name = os.path.splitext(os.path.basename(path))[0] + + # Read header with open(path, "r") as data_file: num_casts = int(data_file.readline()) self.time_offset = datetime.datetime.strptime( data_file.readline()[:19], "%Y-%m-%dT%H:%M:%S" ) - - data = numpy.loadtxt(path, skiprows=3) + # Read rest of data + data = np.loadtxt(path, skiprows=3) num_forecasts = data.shape[0] - self.eye_location = numpy.empty((2, num_forecasts)) + self.eye_location = np.empty((2, num_forecasts)) assert num_casts == num_forecasts self.t = [ self.time_offset + datetime.timedelta(seconds=data[i, 0]) @@ -336,14 +343,24 @@ def read_atcf(self, path, verbose=False): - *path* (string) Path to the file to be read. - *verbose* (bool) Output more info regarding reading. """ - try: - import pandas as pd - except ImportError as e: - print("read_atcf currently requires pandas to work.") - raise e # See here for the ATCF format documentation: # https://www.nrlmry.navy.mil/atcf_web/docs/database/new/abdeck.txt + + # Slightly more robust converter for ATCF data fields that can be + # missing + def num_converter(x): + if isinstance(x, str): + if len(x.strip()) == 0: + # Only whitespace + return np.nan + else: + # Assume this is still a number + return float(x) + elif x is None: + return np.nan + return float(x) + df = pd.read_csv( path, engine="python", @@ -402,27 +419,25 @@ def read_atcf(self, path, verbose=False): "TAU": lambda d: datetime.timedelta(hours=int(d)), "LAT": lambda d: (-0.1 if d[-1] == "S" else 0.1) * int(d.strip("NS ")), "LON": lambda d: (-0.1 if d[-1] == "W" else 0.1) * int(d.strip("WE ")), + "RAD": num_converter, + "RAD1": num_converter, + "RAD2": num_converter, + "RAD3": num_converter, + "RAD4": num_converter, + "ROUTER": num_converter, + "RMW": num_converter, + "STORMNAME": lambda d: (d.strip() if isinstance(d, str) else d), }, - dtype={ - "BASIN": str, - "CY": int, - "VMAX": float, - "MSLP": float, - "TY": str, - "RAD": float, - "RAD1": float, - "RAD2": float, - "RAD3": float, - "RAD4": float, - "ROUTER": float, - "RMW": float, - }, + dtype={"BASIN": str, "CY": int, "VMAX": float, "MSLP": float, "TY": str}, ) # Grab data regarding basin and cyclone number from first row self.basin = ATCF_basins[df["BASIN"][0]] self.ID = df["CY"][0] + # Keep around the name as an array + self.name = df["STORMNAME"].to_numpy() + # Take forecast period TAU into consideration df["DATE"] = df["YYYYMMDDHH"] + df["TAU"] df = df[ @@ -459,7 +474,7 @@ def read_atcf(self, path, verbose=False): "RAD3", "RAD4", ]: - df[c] = df[c].where(df[c] != 0, numpy.nan) # value 0 means NaN + df[c] = df[c].where(df[c] != 0, np.nan) # value 0 means NaN df[c] = df.groupby("DATE")[c].bfill() df = df.groupby("DATE").first() @@ -491,27 +506,6 @@ def read_atcf(self, path, verbose=False): self.wind_speeds[:, 0] = units.convert(self.wind_speeds[:, 0], "knots", "m/s") self.wind_speeds[:, 1] = units.convert(self.wind_speeds[:, 1], "nmi", "m") - # Set NaNs to -1 to mark them as missing - for ar in [ - self.max_wind_speed, - self.central_pressure, - self.max_wind_radius, - self.storm_radius, - self.wind_speeds, - ]: - ar[numpy.isnan(ar)] = -1.0 - - if self.max_wind_speed.min() == -1: - warnings.warn( - "Some timesteps have missing max wind speed. These will not be written" - " out to geoclaw format." - ) - if self.central_pressure.min() == -1: - warnings.warn( - "Some timesteps have missing central pressure. These will not be written" - " out to geoclaw format." - ) - def read_hurdat(self, path, verbose=False): r"""Read in HURDAT formatted storm file @@ -548,13 +542,13 @@ def read_hurdat(self, path, verbose=False): # Parse data block self.t = [] - self.event = numpy.empty(num_lines, dtype=str) - self.classification = numpy.empty(num_lines, dtype=str) - self.eye_location = numpy.empty((num_lines, 2)) - self.max_wind_speed = numpy.empty(num_lines) - self.central_pressure = numpy.empty(num_lines) - self.max_wind_radius = numpy.empty(num_lines) - self.storm_radius = numpy.empty(num_lines) + self.event = np.empty(num_lines, dtype=str) + self.classification = np.empty(num_lines, dtype=str) + self.eye_location = np.empty((num_lines, 2)) + self.max_wind_speed = np.empty(num_lines) + self.central_pressure = np.empty(num_lines) + self.max_wind_radius = np.empty(num_lines) + self.storm_radius = np.empty(num_lines) for i, line in enumerate(data_block): if len(line) == 0: @@ -695,7 +689,7 @@ def read_ibtracs( ) start_times = ds.time.isel(date_time=0) - start_date = numpy.datetime64(start_date) + start_date = np.datetime64(start_date) # find storm with start date closest to provided storm_ix = abs(start_times - start_date).argmin() @@ -719,8 +713,8 @@ def read_ibtracs( b"tcvitals", ] - ## Create mapping from wmo_ or usa_agency - ## to the appropriate variable + # Create mapping from wmo_ or usa_agency + # to the appropriate variable agency_map = {b"": agency_pref.index("wmo")} # account for multiple usa agencies for a in usa_agencies: @@ -735,14 +729,14 @@ def read_ibtracs( # get index into from agency that is wmo_provider def map_val_to_ix(a): - def inner(x): + def func(x): return agency_map[x] - return xr.apply_ufunc(inner, a, vectorize=True) + return xr.apply_ufunc(func, a, vectorize=True) pref_agency_ix = map_val_to_ix(provider) - ## GET MAX WIND SPEED and PRES + # GET MAX WIND SPEED and PRES pref_vals = {} for v in ["wind", "pres"]: all_vals = ds[["{}_{}".format(i, v) for i in agency_pref]].to_array( @@ -765,8 +759,8 @@ def inner(x): # add to dict pref_vals[v] = val_pref - ## THESE CANNOT BE MISSING SO DROP - ## IF EITHER MISSING + # THESE CANNOT BE MISSING SO DROP + # IF EITHER MISSING valid = pref_vals["wind"].notnull() & pref_vals["pres"].notnull() if not valid.any(): raise NoDataError(missing_necessary_data_warning_str) @@ -774,8 +768,8 @@ def inner(x): for i in ["wind", "pres"]: pref_vals[i] = pref_vals[i].sel(date_time=valid) - ## GET RMW and ROCI - ## (these can be missing) + # GET RMW and ROCI + # (these can be missing) for r in ["rmw", "roci"]: order = [ "{}_{}".format(i, r) @@ -787,7 +781,7 @@ def inner(x): val_pref = vals.isel(agency=best_ix) pref_vals[r] = val_pref - ## CONVERT TO GEOCLAW FORMAT + # CONVERT TO GEOCLAW FORMAT # assign basin to be the basin where track originates # in case track moves across basins @@ -805,20 +799,20 @@ def inner(x): ) ) - ## events + # events self.event = ds.usa_record.values.astype(str) - ## time offset + # time offset if (self.event == "L").any(): # if landfall, use last landfall - self.time_offset = numpy.array(self.t)[self.event == "L"][-1] + self.time_offset = np.array(self.t)[self.event == "L"][-1] else: # if no landfall, use last time of storm self.time_offset = self.t[-1] # Classification, note that this is not the category of the storm self.classification = ds.usa_status.values - self.eye_location = numpy.array([ds.lon, ds.lat]).T + self.eye_location = np.array([ds.lon, ds.lat]).T # Intensity information - for now, including only common, basic intensity # info. @@ -880,7 +874,7 @@ def read_ibtracs_processed(self, path, sid, start_date=None): ) start_times = ds.time.isel(time=0) - start_date = numpy.datetime64(start_date) + start_date = np.datetime64(start_date) # find storm with start date closest to provided storm_ix = abs(start_times - start_date).argmin() @@ -928,7 +922,7 @@ def read_ibtracs_processed(self, path, sid, start_date=None): ) # set landfall events - self.event = numpy.array([""] * len(ds.datetime)) + self.event = np.array([""] * len(ds.datetime)) landfalls = (ds.dist2land <= 0) & (ds.dist2land.shift(time=1) > 0) for i in range(landfalls.sum().item()): @@ -939,14 +933,14 @@ def read_ibtracs_processed(self, path, sid, start_date=None): # time offset if (self.event == "L").any(): # if landfall, use last landfall - self.time_offset = numpy.array(self.t)[self.event == "L"][-1] + self.time_offset = np.array(self.t)[self.event == "L"][-1] else: # if no landfall, use last time of storm self.time_offset = self.t[-1] # Classification, note that this is not the category of the storm self.classification = ["NOT_SET"] * len(self.event) - self.eye_location = numpy.array([ds.longstore, ds.latstore]).T + self.eye_location = np.array([ds.longstore, ds.latstore]).T # Intensity information - for now, including only common, basic intensity # info. @@ -1030,7 +1024,7 @@ def read_emanuel(self, path, storm_index, ensemble=None, verbose=False): self.t = pd.to_datetime(storm.datetime).to_pydatetime().tolist() # eye location (n by n) - self.eye_location = numpy.vstack( + self.eye_location = np.vstack( [storm.longstore.values, storm.latstore.values] ).T @@ -1060,8 +1054,8 @@ def read_emanuel(self, path, storm_index, ensemble=None, verbose=False): # populating event with array of empty strings # length num_timesteps = len(storm["datetime"]) - self.event = numpy.empty(num_timesteps, dtype=str) - self.classification = numpy.empty(num_timesteps, dtype=str) + self.event = np.empty(num_timesteps, dtype=str) + self.classification = np.empty(num_timesteps, dtype=str) # use last timestep (recommended by IB) self.time_offset = self.t[-1] @@ -1098,13 +1092,13 @@ def read_jma(self, path, verbose=False): # Parse data block self.t = [] - self.event = numpy.empty(num_lines, dtype=str) - self.classification = numpy.empty(num_lines, dtype=str) - self.eye_location = numpy.empty((num_lines, 2)) - self.max_wind_speed = numpy.empty(num_lines) - self.central_pressure = numpy.empty(num_lines) - self.max_wind_radius = numpy.empty(num_lines) - self.storm_radius = numpy.empty(num_lines) + self.event = np.empty(num_lines, dtype=str) + self.classification = np.empty(num_lines, dtype=str) + self.eye_location = np.empty((num_lines, 2)) + self.max_wind_speed = np.empty(num_lines) + self.central_pressure = np.empty(num_lines) + self.max_wind_radius = np.empty(num_lines) + self.storm_radius = np.empty(num_lines) for i, line in enumerate(data_block): if len(line) == 0: break @@ -1179,12 +1173,12 @@ def read_tcvitals(self, path, verbose=False): # Central_pressure - convert from mbar to Pa - 100.0 # Radius of last isobar contour - convert from km to m - 1000.0 self.t = [] - self.classification = numpy.empty(num_lines, dtype=str) - self.eye_location = numpy.empty((num_lines, 2)) - self.max_wind_speed = numpy.empty(num_lines) - self.central_pressure = numpy.empty(num_lines) - self.max_wind_radius = numpy.empty(num_lines) - self.storm_radius = numpy.empty(num_lines) + self.classification = np.empty(num_lines, dtype=str) + self.eye_location = np.empty((num_lines, 2)) + self.max_wind_speed = np.empty(num_lines) + self.central_pressure = np.empty(num_lines) + self.max_wind_radius = np.empty(num_lines) + self.storm_radius = np.empty(num_lines) for i, data in enumerate(data_block): # End at an empty lines - skips lines at the bottom of a file @@ -1224,6 +1218,7 @@ def read_tcvitals(self, path, verbose=False): # ========================================================================= # Write Routines + def write(self, path, file_format="geoclaw", **kwargs): r"""Write out the storm data to *path* in format *file_format* @@ -1247,7 +1242,7 @@ def write(self, path, file_format="geoclaw", **kwargs): getattr(self, "write_%s" % file_format.lower())(path, **kwargs) def write_geoclaw( - self, path, verbose=False, max_wind_radius_fill=None, storm_radius_fill=None + self, path, force=False, skip=True, verbose=False, fill_dict={}, **kwargs ): r"""Write out a GeoClaw formatted storm file @@ -1255,99 +1250,118 @@ def write_geoclaw( :Input: - *path* (string) Path to the file to be written. + - *skip* (bool) Skip a time if NaNs are found and are not replaced. + Default is `True`. + - *force* (bool) Force output of storm even if there is missing data. + Default is `False`. - *verbose* (bool) Print out additional information when writing. - - *max_wind_radius_fill* (func) Function that can be used to fill in - missing data for `max_wind_radius` values. This defaults to simply - setting the value to -1. The function signature should be - `max_wind_radius(t, storm)` where t is the time of the forecast and - `storm` is the storm object. Note that if this or `storm_radius` - field remains -1 that this data line will be assumed to be redundant - and not be written out. - - *storm_radius_fill* (func) Function that can be used to fill in - missing data for `storm_radius` values. This defaults to simply - setting the value to -1. The function signature should be - `storm_radius(t, storm)` where t is the time of the forecast and - `storm` is the storm object. Note that if this or `max_wind_radius` - field remains -1 that this data line will be assumed to be redundant - and not be written + Default is `False`. + - *fill_dict* (dict) Dictionary of functions to use to fill in missing + data represented by NaNs. The keys are the field to be filled and + the function signature should be `my_func(t, storm)` where t is the + time of the forecast and `storm` is the storm object. If the + field remains a NaN or a function is not provided these lines will + be assumed redundant and will be ommitted. Note that the older + keyword arguments are put in this dictionary. Currently the one + default function is for `storm_radius`, which sets the value to + 500 km. """ - def filler(t, storm): - return -1 - - if max_wind_radius_fill is None: - max_wind_radius_fill = lambda t, storm: -1 - if storm_radius_fill is None: - storm_radius_fill = lambda t, storm: -1 - - # Create list for output - # Leave this first line blank as we need to count the actual valid lines - # that will be left in the file below + # If a filling function is not provided we will provide some defaults + fill_dict.update({"storm_radius": lambda t, storm: 500e3}) + # Handle older interface that had specific fill functions + if "max_wind_radius_fill" in kwargs.keys(): + fill_dict.update({"max_wind_radius": kwargs["max_wind_radius_fill"]}) + if "storm_radius_fill" in kwargs.keys(): + fill_dict.update({"storm_radius": kwargs["storm_radius_fill"]}) + + # Loop through each line of data and if the line is valid, perform the + # necessary work to write it out. Otherwise either raise an exception + # or skip it num_casts = 0 - data_string = [""] - if self.time_offset is None: - data_string.append("None") - self.time_offset = self.t[0] - else: - data_string.append("%s\n\n" % self.time_offset.isoformat()) + data = [] for n in range(len(self.t)): - # Remove duplicate times - if n > 0: - if self.t[n] == self.t[n - 1]: - continue - - format_string = ("{:19,.8e} " * 7)[:-1] + "\n" - data = [] - if not isinstance(self.time_offset, float): - data.append((self.t[n] - self.time_offset).total_seconds()) - else: - data.append(self.t[n] - self.time_offset) - data.append(self.eye_location[n, 0]) - data.append(self.eye_location[n, 1]) - - if self.max_wind_speed[n] == -1: + if self.t[n] == self.t[n - 1]: + # Skip this time continue - data.append(self.max_wind_speed[n]) - - # Allow custom function to set max wind radius if not - # available - if self.max_wind_radius[n] == -1: - new_wind_radius = max_wind_radius_fill(self.t[n], self) - if new_wind_radius == -1: - continue - else: - data.append(new_wind_radius) - else: - data.append(self.max_wind_radius[n]) - if self.central_pressure[n] == -1: + # Check each value we need for this time to make sure it is valid + valid = True + for name in [ + "max_wind_speed", + "central_pressure", + "max_wind_radius", + "storm_radius", + ]: + if np.isnan(getattr(self, name)[n]): + if name in fill_dict.keys(): + # Fill value with function provided + getattr(self, name)[n] = fill_dict[name](self.t[n], self) + elif skip: + # Skip this line + valid = False + if verbose: + # Just warn that a NaN was found but continue + msg = ( + "*** WARNING: The value {} at {} is a " + + "NaN. Skipping this line." + ) + warnings.warn(msg.format(name, self.t[n])) + elif not force: + # If we are not asked to force to write raise an + # exception given the NaN + msg = ( + "The value {} at {} is a NaN and the storm " + + "will not be written in GeoClaw format. If " + + "you want to fill in the value provide a " + + "function or set `force=True`." + ) + raise ValueError(msg.format(name, self.t[n])) + if not valid: continue - data.append(self.central_pressure[n]) - # Allow custom function to set storm radius if not available - if self.storm_radius[n] == -1: - new_storm_radius = storm_radius_fill(self.t[n], self) - if new_storm_radius == -1: - continue - else: - data.append(new_storm_radius) - else: - data.append(self.storm_radius[n]) - - data_string.append(format_string.format(*data)) + # Succeeded, add this time to the output num_casts += 1 + data.append(np.empty(7)) + + # If we do not have a time offset use the first valid row as the + # offset time + if self.time_offset is None: + self.time_offset = self.t[n] - # Write to actual file now that we know exactly how many lines it will - # contain + # Time + if not isinstance(self.time_offset, float): + data[-1][0] = (self.t[n] - self.time_offset).total_seconds() + else: + data[-1][0] = self.t[n] - self.time_offset + # Eye-location + data[-1][1:3] = self.eye_location[n, :] + # Max wind speed + data[-1][3] = self.max_wind_speed[n] + # Max wind radius + data[-1][4] = self.max_wind_radius[n] + # Central pressure + data[-1][5] = self.central_pressure[n] + # Outer storm radius + data[-1][6] = self.storm_radius[n] + + # Write out file + format_string = ("{:19,.8e} " * 7)[:-1] + "\n" try: - # Update number of forecasts here - data_string[0] = "%s\n" % num_casts with open(path, "w") as data_file: - for data_line in data_string: - data_file.write(data_line) + # Write header + data_file.write(f"{num_casts}\n") + if isinstance(self.time_offset, datetime.datetime): + data_file.write(f"{self.time_offset.isoformat()}\n\n") + else: + data_file.write(f"{str(self.time_offset)}\n\n") + + # Write data lines + for line in data: + data_file.write(format_string.format(*line)) except Exception as e: - # Remove possibly partially generated file if not successful + # If an exception occurs clean up a partially generated file if os.path.exists(path): os.remove(path) raise e @@ -1420,15 +1434,15 @@ def write_hurdat(self, path, verbose=False): # Convert latitude to proper Hurdat format e.g 12.0N if latitude > 0: - latitude = str(numpy.abs(latitude)) + "N" + latitude = str(np.abs(latitude)) + "N" else: - latitude = str(numpy.abs(latitude)) + "S" + latitude = str(np.abs(latitude)) + "S" # Convert longitude to proper Hurdat format e.g 12.0W if longitude > 0: - longitude = str(numpy.abs(longitude)) + "E" + longitude = str(np.abs(longitude)) + "E" else: - longitude = str(numpy.abs(longitude)) + "W" + longitude = str(np.abs(longitude)) + "W" data_file.write( "".join( @@ -1560,7 +1574,7 @@ def category(self, categorization="NHC", cat_names=False): # Beaufort scale below uses knots speeds = units.convert(self.max_wind_speed, "m/s", "knots") category = ( - numpy.zeros(speeds.shape) + np.zeros(speeds.shape) + (speeds >= 1) * (speeds < 4) * 1 + (speeds >= 4) * (speeds < 7) * 2 + (speeds >= 7) * (speeds < 11) * 3 @@ -1596,7 +1610,7 @@ def category(self, categorization="NHC", cat_names=False): # TODO: Add TD and TS designations speeds = units.convert(self.max_wind_speed, "m/s", "knots") category = ( - numpy.zeros(speeds.shape) + np.zeros(speeds.shape) + (speeds < 30) * -1 + (speeds >= 64) * (speeds < 83) * 1 + (speeds >= 83) * (speeds < 96) * 2 @@ -1751,7 +1765,7 @@ def fill_rad_w_other_source(t, storm_targ, storm_fill, var, interp_kwargs={}): ) # convert -1 to nan - fill_da = fill_da.where(fill_da > 0, numpy.nan) + fill_da = fill_da.where(fill_da > 0, np.nan) # if not all missing, try using storm_fill to fill if fill_da.notnull().any(): @@ -1766,19 +1780,19 @@ def fill_rad_w_other_source(t, storm_targ, storm_fill, var, interp_kwargs={}): # try replacing with storm_fill # (assuming atcf has more data points than ibtracs) - if not numpy.isnan(fill_interp): + if not np.isnan(fill_interp): return fill_interp # next, try just interpolating other ibtracs values targ_da = xr.DataArray( getattr(storm_targ, var), coords={"t": getattr(storm_targ, "t")}, dims=("t",) ) - targ_da = targ_da.where(targ_da > 0, numpy.nan) + targ_da = targ_da.where(targ_da > 0, np.nan) if targ_da.notnull().any(): targ_da = targ_da.groupby("t").first() targ_da = targ_da.dropna("t") targ_interp = targ_da.interp(t=[t], kwargs=interp_kwargs).item() - if not numpy.isnan(targ_interp): + if not np.isnan(targ_interp): return targ_interp # if nothing worked, return the missing value (-1) diff --git a/src/python/geoclaw/util.py b/src/python/geoclaw/util.py index 496d6cd0e..1e64b3fc7 100644 --- a/src/python/geoclaw/util.py +++ b/src/python/geoclaw/util.py @@ -360,7 +360,8 @@ def get_cache_path(product): end_date.strftime(cache_date_fmt)) filename = '{}_{}_{}'.format(time_zone, datum, units) abs_cache_dir = os.path.abspath(cache_dir) - return os.path.join(abs_cache_dir, product, station, dates, filename) + return os.path.join(abs_cache_dir, product, str(station), dates, + filename) def save_to_cache(cache_path, data): # make parent directories if they do not exist