diff --git a/CODEOWNERS b/CODEOWNERS index 0db33c9..3a6951e 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -3,7 +3,7 @@ # These owners will be the default owners for everything in the repo. #* @defunkt -* @pjpegion @climbfuji +* @pjpegion # Order is important. The last matching pattern has the most precedence. # So if a pull request only touches javascript files, only these owners diff --git a/README.standalone b/README.standalone new file mode 100644 index 0000000..15e0651 --- /dev/null +++ b/README.standalone @@ -0,0 +1,33 @@ +Instructions on how to build and run the stand alone ca_global code on hera +1. Copy enitre directory strucure + cd /scratch2/BMC/gsienkf/Philip.Pegion + cp -r NEMSfv3gfs_develop + +2. Optional Compile full model: (I alredy built is, so you should not have to) + cd /tests/ + ./compile.sh $PWD/../FV3 hera.intel '32BIT=Y HYDRO=N' 1 NO NO + +4. compile standalone model + cd ../stochastic_physics + # my loaded modules: + 1) intel/18.0.5.274 3) netcdf/4.7.0 5) contrib 7) anaconda/latest + 2) impi/2018.0.4 4) hdf5/1.10.5 6) grads/2.2.1 + + + compile_standalone_ca OR compile_standalone + +5. There is an input.nml which is currently set to run at C96, and the INPUT directory points to + C96 grid files. If you want to run at C384, you will need to rm INPUT and link in the C384 + directory (ln -sf INPUT_C384 INPUT) and make npx and npy 385 in input.nml + +5. get an interactive session + + salloc --x11=first -q debug -t 0:30:00 --nodes=1 -A -n 6 + +6. run job + srun standalone_ca OR standalone_stochy + +7. post-process job to 360x180 grid (script is on hera in ~Philip.Pegion) + run_fregrid.sh (OR run_fregrid_c384.sh) + + output file is ca_out.nc diff --git a/atmosphere_stub.F90 b/atmosphere_stub.F90 new file mode 100644 index 0000000..38a7fd7 --- /dev/null +++ b/atmosphere_stub.F90 @@ -0,0 +1,783 @@ +module atmosphere_stub_mod + +#include + +!----------------- +! FMS modules: +!----------------- +use time_manager_mod, only: time_type, get_time, set_time, operator(+), & + operator(-), operator(/), time_type_to_real +use fms_mod, only: file_exist, open_namelist_file, & + close_file, error_mesg, FATAL, & + check_nml_error, stdlog, & + write_version_number, & + set_domain, & + read_data, & + mpp_clock_id, mpp_clock_begin, & + mpp_clock_end, CLOCK_SUBCOMPONENT, & + clock_flag_default, nullify_domain +use mpp_mod, only: mpp_error, stdout, FATAL, NOTE, & + input_nml_file, mpp_root_pe, & + mpp_npes, mpp_pe, mpp_chksum, & + mpp_get_current_pelist, & + mpp_set_current_pelist +use mpp_parameter_mod, only: EUPDATE, WUPDATE, SUPDATE, NUPDATE +use mpp_domains_mod, only: domain2d, mpp_update_domains +use xgrid_mod, only: grid_box_type + +!----------------- +! FV core modules: +!----------------- +use fv_arrays_mod, only: fv_atmos_type,fv_grid_bounds_type,fv_grid_type +use fv_control_stub_mod, only: fv_init, ngrids +use fv_timing_mod, only: timing_on, timing_off +use fv_sg_mod, only: fv_subgrid_z +use fv_update_phys_mod, only: fv_update_phys +use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain +use tp_core_mod, only: copy_corners +use a2b_edge_mod, only: a2b_ord4 + +implicit none +private + +!--- driver routines +public :: atmosphere_init_stub + +!--- utility routines +!public :: atmosphere_return_winds, atmosphere_smooth_noise +public :: atmosphere_resolution,atmosphere_domain,& + atmosphere_scalar_field_halo,atmosphere_control_data + +!--- physics/radiation data exchange routines + +!----------------------------------------------------------------------- +! version number of this module +! Include variable "version" to be written to log file. +#include +character(len=20) :: mod_name = 'fvGFS/atmosphere_mod' + +!---- private data ---- + public Atm, mytile + + !These are convenience variables for local use only, and are set to values in Atm% + real :: dt_atmos + integer :: npx, npy, npz, ncnst, pnats + integer :: isc, iec, jsc, jec + integer :: isd, ied, jsd, jed + integer :: sec, seconds, days + integer :: id_dynam, id_fv_diag, id_subgridz + + + integer :: mytile = 1 + integer :: p_split = 1 + integer, allocatable :: pelist(:) + logical, allocatable :: grids_on_this_pe(:) + type(fv_atmos_type), allocatable, target :: Atm(:) + + integer :: id_udt_dyn, id_vdt_dyn + + +!---dynamics tendencies for use in fv_subgrid_z and during fv_update_phys + real, allocatable, dimension(:,:,:) :: u_dt, v_dt, t_dt + real, allocatable :: pref(:,:), dum1d(:) + + logical :: first_diag = .true. + +contains + + +!>@brief The subroutine 'atmosphere_init' is an API to initialize the FV3 dynamical core, +!! including the grid structures, memory, initial state (self-initialization or restart), +!! and diagnostics. + subroutine atmosphere_init_stub (Grid_box, area) +#ifdef OPENMP + use omp_lib +#endif + type(grid_box_type), intent(inout) :: Grid_box + real*8, pointer, dimension(:,:), intent(inout) :: area +!--- local variables --- + integer :: i, n + + call timing_on('ATMOS_INIT') + allocate(pelist(mpp_npes())) + call mpp_get_current_pelist(pelist) + + +!---- compute physics/atmos time step in seconds ---- + + dt_atmos = real(sec) + + call fv_init( Atm, dt_atmos, grids_on_this_pe, p_split ) ! allocates Atm components + + do n=1,ngrids + if (grids_on_this_pe(n)) mytile = n + enddo + + npx = Atm(mytile)%npx + npy = Atm(mytile)%npy + npz = Atm(mytile)%npz + ncnst = Atm(mytile)%ncnst + pnats = Atm(mytile)%flagstruct%pnats + + isc = Atm(mytile)%bd%isc + iec = Atm(mytile)%bd%iec + jsc = Atm(mytile)%bd%jsc + jec = Atm(mytile)%bd%jec + + isd = isc - Atm(mytile)%bd%ng + ied = iec + Atm(mytile)%bd%ng + jsd = jsc - Atm(mytile)%bd%ng + jed = jec + Atm(mytile)%bd%ng + + + + ! Allocate grid variables to be used to calculate gradient in 2nd order flux exchange + ! This data is only needed for the COARSEST grid. + + allocate(Grid_box%dx ( isc:iec , jsc:jec+1)) + allocate(Grid_box%dy ( isc:iec+1, jsc:jec )) + allocate(Grid_box%area ( isc:iec , jsc:jec )) + allocate(Grid_box%edge_w( jsc:jec+1)) + allocate(Grid_box%edge_e( jsc:jec+1)) + allocate(Grid_box%edge_s( isc:iec+1 )) + allocate(Grid_box%edge_n( isc:iec+1 )) + allocate(Grid_box%en1 (3, isc:iec , jsc:jec+1)) + allocate(Grid_box%en2 (3, isc:iec+1, jsc:jec )) + allocate(Grid_box%vlon (3, isc:iec , jsc:jec )) + allocate(Grid_box%vlat (3, isc:iec , jsc:jec )) + Grid_box%dx ( isc:iec , jsc:jec+1) = Atm(mytile)%gridstruct%dx ( isc:iec, jsc:jec+1) + Grid_box%dy ( isc:iec+1, jsc:jec ) = Atm(mytile)%gridstruct%dy ( isc:iec+1, jsc:jec ) + Grid_box%area ( isc:iec , jsc:jec ) = Atm(mytile)%gridstruct%area ( isc:iec , jsc:jec ) + Grid_box%edge_w( jsc:jec+1) = Atm(mytile)%gridstruct%edge_w( jsc:jec+1) + Grid_box%edge_e( jsc:jec+1) = Atm(mytile)%gridstruct%edge_e( jsc:jec+1) + Grid_box%edge_s( isc:iec+1 ) = Atm(mytile)%gridstruct%edge_s( isc:iec+1) + Grid_box%edge_n( isc:iec+1 ) = Atm(mytile)%gridstruct%edge_n( isc:iec+1) + Grid_box%en1 (:, isc:iec , jsc:jec+1) = Atm(mytile)%gridstruct%en1 (:, isc:iec , jsc:jec+1) + Grid_box%en2 (:, isc:iec+1, jsc:jec ) = Atm(mytile)%gridstruct%en2 (:, isc:iec+1, jsc:jec ) + do i = 1,3 + Grid_box%vlon(i, isc:iec , jsc:jec ) = Atm(mytile)%gridstruct%vlon (isc:iec , jsc:jec, i ) + Grid_box%vlat(i, isc:iec , jsc:jec ) = Atm(mytile)%gridstruct%vlat (isc:iec , jsc:jec, i ) + enddo + allocate (area(isc:iec , jsc:jec )) + area(isc:iec,jsc:jec) = Atm(mytile)%gridstruct%area_64(isc:iec,jsc:jec) + + + call set_domain ( Atm(mytile)%domain ) + +!----- initialize atmos_axes and fv_dynamics diagnostics + !I've had trouble getting this to work with multiple grids at a time; worth revisiting? +! --- initialize clocks for dynamics, physics_down and physics_up + id_dynam = mpp_clock_id ('FV dy-core', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_fv_diag = mpp_clock_id ('FV Diag', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + + call timing_off('ATMOS_INIT') + + + end subroutine atmosphere_init_stub + +! subroutine atmosphere_smooth_noise (wnoise,npass,ns_type,renorm_type) +! +! !--- interface variables --- +! real,intent(inout) :: wnoise(isd:ied,jsd:jed,1) +! integer, intent(in) :: npass,ns_type,renorm_type +! !--- local variables +! integer:: i,j,nloops,nlast +! real ::inflation(isc:iec,jsc:jec),inflation2 +! ! scale factor for restoring inflation +! ! logic: +! ! if box mean: scalar get basic scaling, vector gets 1/grid dependent scaling 0-0 ; 0 - 1 +! ! if box mean2: no scaling +! ! if del2 : scalar gets grid dependent scaling,vector get basic scaling 1 0; 1 1 +! if(npass.GT.0) then +! if (ns_type.NE.2) then +! if (ns_type.EQ. 0) then +! !inflation2=1.0/sqrt(1.0/(4.0*npass)) +! inflation2=1.0/sqrt(1.0/(9.0*npass)) +! else +! inflation2=1.0/sqrt(1.0/(11.0/3.0*npass)) +! endif +! if ( ns_type.EQ.1) then ! del2 smoothing needs to be scaled by grid-size +! do j=jsc,jec +! do i=isc,iec +! inflation(i,j)=inflation2*Atm(mytile)%gridstruct%dxAV/(0.5*(Atm(mytile)%gridstruct%dx(i,j)+Atm(mytile)%gridstruct%dy(i,j))) +! enddo +! enddo +! else +! if ( renorm_type.EQ.1) then ! box smooth does not need scaling for scalar +! do j=jsc,jec +! do i=isc,iec +! inflation(i,j)=inflation2 +! enddo +! enddo +! else +! ! box mean needs inversize grid-size scaling for vector +! do j=jsc,jec +! do i=isc,iec +! inflation(i,j)=inflation2*(0.5*(Atm(mytile)%gridstruct%dx(i,j)+Atm(mytile)%gridstruct%dy(i,j)))/Atm(mytile)%gridstruct%dxAV +! enddo +! enddo +! endif +! endif +! endif +! nloops=npass/3 +! nlast=mod(npass,3) +! do j=1,nloops +! if (ns_type.EQ.1) then +! !call del2_cubed(wnoise , 0.25*Atm(mytile)%gridstruct%da_min, Atm(mytile)%gridstruct, & +! call del2_cubed(wnoise , 0.20*Atm(mytile)%gridstruct%da_min, Atm(mytile)%gridstruct, & +! Atm(mytile)%domain, npx, npy, 1, 3, Atm(mytile)%bd) +! else if (ns_type .EQ. 0) then +! call box_mean(wnoise , Atm(mytile)%gridstruct, Atm(mytile)%domain, Atm(mytile)%npx, Atm(mytile)%npy, 1, 3, Atm(mytile)%bd) +! else if (ns_type .EQ. 2) then +! call box_mean2(wnoise , Atm(mytile)%gridstruct, Atm(mytile)%domain, Atm(mytile)%npx, Atm(mytile)%npy, 1, 3, Atm(mytile)%bd) +! endif +! enddo +! if(nlast>0) then +! if (ns_type.EQ.1) then +! !call del2_cubed(wnoise , 0.25*Atm(mytile)%gridstruct%da_min, Atm(mytile)%gridstruct, & +! call del2_cubed(wnoise , 0.20*Atm(mytile)%gridstruct%da_min, Atm(mytile)%gridstruct, & +! Atm(mytile)%domain, npx, npy, 1, nlast, Atm(mytile)%bd) +! else if (ns_type .EQ. 0) then +! call box_mean(wnoise , Atm(mytile)%gridstruct, Atm(mytile)%domain, Atm(mytile)%npx, Atm(mytile)%npy, 1, nlast, Atm(mytile)%bd) +! else if (ns_type .EQ. 2) then +! call box_mean2(wnoise , Atm(mytile)%gridstruct, Atm(mytile)%domain, Atm(mytile)%npx, Atm(mytile)%npy, 1, nlast, Atm(mytile)%bd) +! endif +! endif +! ! restore amplitude +! if (ns_type.NE.2) then +! do j=jsc,jec +! do i=isc,iec +! wnoise(i,j,1)=wnoise(i,j,1)*inflation(i,j) +! enddo +! enddo +! endif +! endif +! end subroutine atmosphere_smooth_noise + +! subroutine atmosphere_return_winds (psi,ua,va,edge,km,vwts) +! integer,intent(in) :: edge +! real,intent(inout) :: psi(isd:ied,jsd:jed) +! real,intent(inout) :: ua(isc:iec+edge,jsc:jec) +! real,intent(inout) :: va(isc:iec,jsc:jec+edge) +! integer, optional,intent(in):: km +! real, optional,intent(in):: vwts(:) +! integer :: k +! call timing_on('COMM_TOTAL') +! call mpp_update_domains(psi, Atm(mytile)%domain, complete=.true.) +! call timing_off('COMM_TOTAL') +! if (edge.EQ.0) then +! call make_a_winds(ua, va, psi,Atm(mytile)%ng,Atm(mytile)%gridstruct,Atm(mytile)%bd,Atm(mytile)%npx,Atm(mytile)%npy) +! endif +! if (edge.EQ.1) then +! call make_c_winds(ua, va, psi,Atm(mytile)%ng,Atm(mytile)%gridstruct,Atm(mytile)%bd,Atm(mytile)%npx,Atm(mytile)%npy) +!! populate wind perturbations right here +! do k=1,km +! Atm(mytile)%urandom_c(isc:iec+edge,jsc:jec ,k)=ua*vwts(k) +! Atm(mytile)%vrandom_c(isc:iec ,jsc:jec+edge,k)=va*vwts(k) +! enddo +! !call mpp_update_domains(Atm(mytile)%urandom_c, Atm(mytile)%domain, complete=.true.) +! !call mpp_update_domains(Atm(mytile)%vrandom_c, Atm(mytile)%domain, complete=.true.) +! endif +! end subroutine atmosphere_return_winds +! + subroutine del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd) + !--------------------------------------------------------------- + ! This routine is for filtering the omega field for the physics + !--------------------------------------------------------------- + integer, intent(in):: npx, npy, km, nmax + real(kind=8), intent(in):: cd !< cd = K * da_min; 0 < K < 0.25 + type(fv_grid_bounds_type), intent(IN) :: bd + real, intent(inout):: q(bd%isd:bd%ied,bd%jsd:bd%jed,km) + type(fv_grid_type), intent(IN), target :: gridstruct + type(domain2d), intent(INOUT) :: domain + real, parameter:: r3 = 1./3. + real :: fx(bd%isd:bd%ied+1,bd%jsd:bd%jed), fy(bd%isd:bd%ied,bd%jsd:bd%jed+1) + real :: q2(bd%isd:bd%ied,bd%jsd:bd%jed) + integer i,j,k, n, nt, ntimes + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + !Local routine pointers +! real, pointer, dimension(:,:) :: rarea +! real, pointer, dimension(:,:) :: del6_u, del6_v +! logical, pointer :: sw_corner, se_corner, ne_corner, nw_corner + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + ntimes = min(3, nmax) + + call timing_on('COMM_TOTAL') + call mpp_update_domains(q, domain, complete=.true.) + call timing_off('COMM_TOTAL') + + + do n=1,ntimes + nt = ntimes - n + +!$OMP parallel do default(none) shared(km,q,is,ie,js,je,npx,npy, & +!$OMP nt,isd,jsd,gridstruct,bd, & +!$OMP cd) & +!$OMP private(fx, fy) + do k=1,km + + if ( gridstruct%sw_corner ) then + q(1,1,k) = (q(1,1,k)+q(0,1,k)+q(1,0,k)) * r3 + q(0,1,k) = q(1,1,k) + q(1,0,k) = q(1,1,k) + endif + if ( gridstruct%se_corner ) then + q(ie, 1,k) = (q(ie,1,k)+q(npx,1,k)+q(ie,0,k)) * r3 + q(npx,1,k) = q(ie,1,k) + q(ie, 0,k) = q(ie,1,k) + endif + if ( gridstruct%ne_corner ) then + q(ie, je,k) = (q(ie,je,k)+q(npx,je,k)+q(ie,npy,k)) * r3 + q(npx,je,k) = q(ie,je,k) + q(ie,npy,k) = q(ie,je,k) + endif + if ( gridstruct%nw_corner ) then + q(1, je,k) = (q(1,je,k)+q(0,je,k)+q(1,npy,k)) * r3 + q(0, je,k) = q(1,je,k) + q(1,npy,k) = q(1,je,k) + endif + + if(nt>0 .and. (.not. gridstruct%regional)) call copy_corners(q(isd,jsd,k), npx, npy, 1, gridstruct%nested, bd, & + gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner ) + do j=js-nt,je+nt + do i=is-nt,ie+1+nt +#ifdef USE_SG + fx(i,j) = gridstruct%dy(i,j)*gridstruct%sina_u(i,j)*(q(i-1,j,k)-q(i,j,k))*gridstruct%rdxc(i,j) +#else + fx(i,j) = gridstruct%del6_v(i,j)*(q(i-1,j,k)-q(i,j,k)) +#endif + enddo + enddo + + if(nt>0 .and. (.not. gridstruct%regional)) call copy_corners(q(isd,jsd,k), npx, npy, 2, gridstruct%nested, bd, & + gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) + do j=js-nt,je+1+nt + do i=is-nt,ie+nt +#ifdef USE_SG + fy(i,j) = gridstruct%dx(i,j)*gridstruct%sina_v(i,j)*(q(i,j-1,k)-q(i,j,k))*gridstruct%rdyc(i,j) +#else + fy(i,j) = gridstruct%del6_u(i,j)*(q(i,j-1,k)-q(i,j,k)) +#endif + enddo + enddo + + do j=js-nt,je+nt + do i=is-nt,ie+nt + q(i,j,k) = q(i,j,k) + cd*gridstruct%rarea(i,j)*(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1)) + enddo + enddo + enddo + enddo + + end subroutine del2_cubed + +!>@brief The subroutine 'box_mean' filters a field with a 9-point mean stencil + + subroutine box_mean(q, gridstruct, domain, npx, npy, km, nmax, bd) + !--------------------------------------------------------------- + ! This routine is for filtering the omega field for the physics + !--------------------------------------------------------------- + integer, intent(in):: npx, npy, km, nmax + type(fv_grid_bounds_type), intent(IN) :: bd + real, intent(inout):: q(bd%isd:bd%ied,bd%jsd:bd%jed,km) + type(fv_grid_type), intent(IN), target :: gridstruct + type(domain2d), intent(INOUT) :: domain + real, parameter:: r3 = 1./3.,r9=1./9. + real :: q2(bd%isd:bd%ied,bd%jsd:bd%jed) + integer i,j,k, n, nt, ntimes + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + !Local routine pointers +! real, pointer, dimension(:,:) :: rarea +! real, pointer, dimension(:,:) :: del6_u, del6_v +! logical, pointer :: sw_corner, se_corner, ne_corner, nw_corner + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + ntimes = min(3, nmax) + + call timing_on('COMM_TOTAL') + call mpp_update_domains(q, domain, complete=.true.) + call timing_off('COMM_TOTAL') + + + do n=1,ntimes + nt = ntimes !- n + +!$OMP parallel do default(none) shared(km,is,ie,js,je,npx,npy, & +!$OMP q,nt,isd,jsd,gridstruct,bd) & +!$OMP private(q2) + do k=1,km + + if ( gridstruct%sw_corner ) then + q(1,1,k) = (q(1,1,k)+q(0,1,k)+q(1,0,k)) * r3 + q(0,1,k) = q(1,1,k) + q(1,0,k) = q(1,1,k) + endif + if ( gridstruct%se_corner ) then + q(ie, 1,k) = (q(ie,1,k)+q(npx,1,k)+q(ie,0,k)) * r3 + q(npx,1,k) = q(ie,1,k) + q(ie, 0,k) = q(ie,1,k) + endif + if ( gridstruct%ne_corner ) then + q(ie, je,k) = (q(ie,je,k)+q(npx,je,k)+q(ie,npy,k)) * r3 + q(npx,je,k) = q(ie,je,k) + q(ie,npy,k) = q(ie,je,k) + endif + if ( gridstruct%nw_corner ) then + q(1, je,k) = (q(1,je,k)+q(0,je,k)+q(1,npy,k)) * r3 + q(0, je,k) = q(1,je,k) + q(1,npy,k) = q(1,je,k) + endif + + if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 1, gridstruct%nested, bd, & + gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner ) + + if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 2, gridstruct%nested, bd, & + gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) + + !do j=js-nt,je+nt + ! do i=is-nt,ie+nt + do j=jsd+1,jed-1 + do i=isd+1,ied-1 + !q2(i,j) = (gridstruct%area(i-1,j+1)*q(i-1,j+1,k) + gridstruct%area(i,j+1)*q(i,j+1,k) + gridstruct%area(i+1,j+1)*q(i+1,j+1,k) +& + ! gridstruct%area(i-1,j )*q(i-1,j,k) + gridstruct%area(i,j )*q(i,j ,k) + gridstruct%area(i+1,j )*q(i+1,j ,k) +& + ! gridstruct%area(i-1,j-1)*q(i-1,j-1,k) + gridstruct%area(i,j-1)*q(i,j-1,k) + gridstruct%area(i+1,j-1)*q(i+1,j-1,k))/SUM(gridstruct%area(i-1:i+1,j-1:j+1)) + q2(i,j) = r9*(q(i-1,j+1,k)+q(i,j+1,k)+q(i+1,j+1,k)+q(i-1,j,k)+q(i,j,k)+q(i+1,j,k)+q(i-1,j-1,k)+q(i,j-1,k)+q(i+1,j-1,k)) + !if (j.GE. je .AND. i.GE. ie) print*,'area +1=',gridstruct%area(i-1:i+1,j+1) + !if (j.GE. je .AND. i.GE. ie) print*,'area =',gridstruct%area(i-1:i+1,j) + !if (j.GE. je .AND. i.GE. ie) print*,'area -1=',gridstruct%area(i-1:i+1,j-1) + !if (j.GE. je .AND. i.GE. ie) print*,'q +1=',q(i-1:i+1,j+1,k) + !if (j.GE. je .AND. i.GE. ie) print*,'q =',q(i-1:i+1,j,k) + !if (j.GE. je .AND. i.GE. ie) print*,'q -1=',q(i-1:i+1,j-1,k) + enddo + enddo + do j=js-nt,je+nt + do i=is-nt,ie+nt + q(i,j,k)=q2(i,j) + enddo + enddo + enddo + enddo + end subroutine box_mean + + subroutine box_mean2(q, gridstruct, domain, npx, npy, km, nmax, bd) + !--------------------------------------------------------------- + ! This routine is for filtering the omega field for the physics + !--------------------------------------------------------------- + integer, intent(in):: npx, npy, km, nmax + type(fv_grid_bounds_type), intent(IN) :: bd + real, intent(inout):: q(bd%isd:bd%ied,bd%jsd:bd%jed,km) + type(fv_grid_type), intent(IN), target :: gridstruct + type(domain2d), intent(INOUT) :: domain + real, parameter:: r3 = 1./3.,r10=0.1 + real :: q2(bd%isd:bd%ied,bd%jsd:bd%jed) + integer i,j,k, n, nt, ntimes + integer :: is, ie, js, je + integer :: isd, ied, jsd, jed + + !Local routine pointers +! real, pointer, dimension(:,:) :: rarea +! real, pointer, dimension(:,:) :: del6_u, del6_v +! logical, pointer :: sw_corner, se_corner, ne_corner, nw_corner + + is = bd%is + ie = bd%ie + js = bd%js + je = bd%je + isd = bd%isd + ied = bd%ied + jsd = bd%jsd + jed = bd%jed + + ntimes = min(3, nmax) + + call timing_on('COMM_TOTAL') + call mpp_update_domains(q, domain, complete=.true.) + call timing_off('COMM_TOTAL') + + + do n=1,ntimes + nt = ntimes !- n + +!$OMP parallel do default(none) shared(km,is,ie,js,je,npx,npy, & +!$OMP q,nt,isd,jsd,gridstruct,bd) & +!$OMP private(q2) + do k=1,km + + if ( gridstruct%sw_corner ) then + q(1,1,k) = (q(1,1,k)+q(0,1,k)+q(1,0,k)) * r3 + q(0,1,k) = q(1,1,k) + q(1,0,k) = q(1,1,k) + endif + if ( gridstruct%se_corner ) then + q(ie, 1,k) = (q(ie,1,k)+q(npx,1,k)+q(ie,0,k)) * r3 + q(npx,1,k) = q(ie,1,k) + q(ie, 0,k) = q(ie,1,k) + endif + if ( gridstruct%ne_corner ) then + q(ie, je,k) = (q(ie,je,k)+q(npx,je,k)+q(ie,npy,k)) * r3 + q(npx,je,k) = q(ie,je,k) + q(ie,npy,k) = q(ie,je,k) + endif + if ( gridstruct%nw_corner ) then + q(1, je,k) = (q(1,je,k)+q(0,je,k)+q(1,npy,k)) * r3 + q(0, je,k) = q(1,je,k) + q(1,npy,k) = q(1,je,k) + endif + + if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 1, gridstruct%nested, bd, & + gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner ) + + if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 2, gridstruct%nested, bd, & + gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner) + + do j=jsd+1,jed-1 + do i=isd+1,ied-1 + q2(i,j) = r10*(q(i-1,j+1,k)+q(i,j+1,k)+q(i+1,j+1,k)+q(i-1,j,k)+2*q(i,j,k)+q(i+1,j,k)+q(i-1,j-1,k)+q(i,j-1,k)+q(i+1,j-1,k)) + enddo + enddo + do j=js-nt,je+nt + do i=is-nt,ie+nt + q(i,j,k)=q2(i,j) + enddo + enddo + enddo + enddo + + end subroutine box_mean2 +subroutine make_a_winds(ua, va, psi, ng, gridstruct, bd, npx, npy) + +integer, intent(IN) :: ng, npx, npy +type(fv_grid_bounds_type), intent(IN) :: bd +real, intent(inout) :: psi(bd%isd:bd%ied,bd%jsd:bd%jed) +real, intent(inout) :: ua(bd%isc:bd%iec ,bd%jsc:bd%jec ) +real, intent(inout) :: va(bd%isc:bd%iec ,bd%jsc:bd%jec ) +type(fv_grid_type), intent(IN), target :: gridstruct +! Local: +real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed) :: wk +real, dimension(bd%isc:bd%iec,bd%jsc:bd%jec) :: u,v +integer i,j + +integer :: is, ie, js, je +is = bd%is +ie = bd%ie +js = bd%js +je = bd%je + +call a2b_ord4( psi, wk, gridstruct, npx, npy, is, ie, js, je, ng, .false.) +do j=js,je + do i=is,ie + u(i,j) = gridstruct%rdy(i,j)*0.5*(wk(i,j+1)+wk(i+1,j+1)-(wk(i,j)+wk(i+1,j))) + enddo +enddo +do j=js,je + do i=is,ie + v(i,j) = gridstruct%rdx(i,j)*0.5*(wk(i,j)+wk(i,j+1)-(wk(i+1,j)+wk(i+1,j+1))) + enddo +enddo +do j=js,je + do i=is,ie + ua(i,j) = 0.5*(gridstruct%a11(i,j)+gridstruct%a11(i,j+1))*u(i,j) + 0.5*(gridstruct%a12(i,j)+gridstruct%a12(i,j+1))*v(i,j) + va(i,j) = 0.5*(gridstruct%a21(i,j)+gridstruct%a21(i+1,j))*u(i,j) + 0.5*(gridstruct%a22(i,j)+gridstruct%a22(i+1,j))*v(i,j) + enddo +enddo + +end subroutine make_a_winds + +subroutine make_c_winds(uc, vc, psi, ng, gridstruct, bd, npx, npy) + +integer, intent(IN) :: ng, npx, npy +type(fv_grid_bounds_type), intent(IN) :: bd +real, intent(inout) :: psi(bd%isd:bd%ied,bd%jsd:bd%jed) +real, intent(inout) :: uc(bd%isc:bd%iec+1 ,bd%jsc:bd%jec ) +real, intent(inout) :: vc(bd%isc:bd%iec ,bd%jsc:bd%jec+1) +type(fv_grid_type), intent(IN), target :: gridstruct +! Local: +real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed) :: wk +real, dimension(bd%isc:bd%iec,bd%jsc:bd%jec) :: u,v +integer i,j + +integer :: is, ie, js, je +is = bd%is +ie = bd%ie +js = bd%js +je = bd%je + +call a2b_ord4( psi, wk, gridstruct, npx, npy, is, ie, js, je, ng, .false.) +do j=js,je + do i=is,ie+1 + uc(i,j) = gridstruct%rdy(i,j)*(wk(i,j+1)-wk(i,j)) + enddo +enddo +do j=js,je+1 + do i=is,ie + vc(i,j) = gridstruct%rdx(i,j)*(wk(i,j)-wk(i+1,j)) + enddo +enddo + +end subroutine make_c_winds + +!>@brief The subroutine 'atmospehre_resolution' is an API to return the local +!! extents of the current MPI-rank or the global extents of the current +!! cubed-sphere tile. + subroutine atmosphere_resolution (i_size, j_size, global) + integer, intent(out) :: i_size, j_size + logical, intent(in), optional :: global + logical :: local + + local = .true. + if( PRESENT(global) ) local = .NOT.global + + if( local ) then + i_size = iec - isc + 1 + j_size = jec - jsc + 1 + else + i_size = npx - 1 + j_size = npy - 1 + end if + end subroutine atmosphere_resolution +!>@brief The subroutine 'atmosphere_domain' is an API to return +!! the "domain2d" variable associated with the coupling grid and the +!! decomposition for the current cubed-sphere tile. +!>@detail Coupling is done using the mass/temperature grid with no halos. + subroutine atmosphere_domain ( fv_domain, layout, regional, nested, pelist ) + type(domain2d), intent(out) :: fv_domain + integer, intent(out) :: layout(2) + logical, intent(out) :: regional + logical, intent(out) :: nested + integer, pointer, intent(out) :: pelist(:) +! returns the domain2d variable associated with the coupling grid +! note: coupling is done using the mass/temperature grid with no halos + + fv_domain = Atm(mytile)%domain_for_coupler + layout(1:2) = Atm(mytile)%layout(1:2) + regional = Atm(mytile)%flagstruct%regional + nested = ngrids > 1 + call set_atmosphere_pelist() + pelist => Atm(mytile)%pelist + + end subroutine atmosphere_domain + + subroutine set_atmosphere_pelist () + call mpp_set_current_pelist(Atm(mytile)%pelist, no_sync=.TRUE.) + end subroutine set_atmosphere_pelist + + +!>@brief The subroutine 'atmosphere_scalar_field_halo' is an API to return halo information +!! of the current MPI_rank for an input scalar field. +!>@detail Up to three point haloes can be returned by this API which includes special handling for +!! the cubed-sphere tile corners. Output will be in (i,j,k) while input can be in (i,j,k) or +!! horizontally-packed form (ix,k). + subroutine atmosphere_scalar_field_halo (data, halo, isize, jsize, ksize, data_p) + !-------------------------------------------------------------------- + ! data - output array to return the field with halo (i,j,k) + ! optionally input for field already in (i,j,k) form + ! sized to include the halo of the field (+ 2*halo) + ! halo - size of the halo (must be less than 3) + ! ied - horizontal resolution in i-dir with haloes + ! jed - horizontal resolution in j-dir with haloes + ! ksize - vertical resolution + ! data_p - optional input field in packed format (ix,k) + !-------------------------------------------------------------------- + !--- interface variables --- + real*8, dimension(1:isize,1:jsize,ksize), intent(inout) :: data !< output array to return the field with halo (i,j,k) + !< optionally input for field already in (i,j,k) form + !< sized to include the halo of the field (+ 2*halo) + integer, intent(in) :: halo !< size of the halo (must be less than 3) + integer, intent(in) :: isize !< horizontal resolution in i-dir with haloes + integer, intent(in) :: jsize !< horizontal resolution in j-dir with haloes + integer, intent(in) :: ksize !< vertical resolution + real*8, dimension(:,:), optional, intent(in) :: data_p !< optional input field in packed format (ix,k) + !--- local variables --- + integer :: i, j, k + integer :: ic, jc + character(len=44) :: modname = 'atmosphere_mod::atmosphere_scalar_field_halo' + integer :: mpp_flags + + !--- perform error checking + if (halo .gt. 3) call mpp_error(FATAL, modname//' - halo.gt.3 requires extending the MPP domain to support') + ic = isize - 2 * halo + jc = jsize - 2 * halo + + !--- if packed data is present, unpack it into the two-dimensional data array + if (present(data_p)) then + if (ic*jc .ne. size(data_p,1)) call mpp_error(FATAL, modname//' - incorrect sizes for incoming & + &variables data and data_p') + data = 0. +!$OMP parallel do default (none) & +!$OMP shared (data, data_p, halo, ic, jc, ksize) & +!$OMP private (i, j, k) + do k = 1, ksize + do j = 1, jc + do i = 1, ic + data(i+halo, j+halo, k) = data_p(i + (j-1)*ic, k) + enddo + enddo + enddo + endif + + mpp_flags = EUPDATE + WUPDATE + SUPDATE + NUPDATE + if (halo == 1) then + call mpp_update_domains(data, Atm(mytile)%domain_for_coupler, flags=mpp_flags, complete=.true.) + elseif (halo == 3) then + call mpp_update_domains(data, Atm(mytile)%domain, flags=mpp_flags, complete=.true.) + else + call mpp_error(FATAL, modname//' - unsupported halo size') + endif + + !--- fill the halo points when at a corner of the cubed-sphere tile + !--- interior domain corners are handled correctly + if ( (isc==1) .or. (jsc==1) .or. (iec==npx-1) .or. (jec==npy-1) ) then + do k = 1, ksize + do j=1,halo + do i=1,halo + if ((isc== 1) .and. (jsc== 1)) data(halo+1-j ,halo+1-i ,k) = data(halo+i ,halo+1-j ,k) !SW Corner + if ((isc== 1) .and. (jec==npy-1)) data(halo+1-j ,halo+jc+i,k) = data(halo+i ,halo+jc+j,k) !NW Corner + if ((iec==npx-1) .and. (jsc== 1)) data(halo+ic+j,halo+1-i ,k) = data(halo+ic-i+1,halo+1-j ,k) !SE Corner + if ((iec==npx-1) .and. (jec==npy-1)) data(halo+ic+j,halo+jc+i,k) = data(halo+ic-i+1,halo+jc+j,k) !NE Corner + enddo + enddo + enddo + endif + + return + end subroutine atmosphere_scalar_field_halo + + + subroutine atmosphere_control_data (i1, i2, j1, j2, kt, p_hydro, hydro, tile_num) + integer, intent(out) :: i1, i2, j1, j2, kt + logical, intent(out), optional :: p_hydro, hydro + integer, intent(out), optional :: tile_num + i1 = Atm(mytile)%bd%isc + i2 = Atm(mytile)%bd%iec + j1 = Atm(mytile)%bd%jsc + j2 = Atm(mytile)%bd%jec + kt = Atm(mytile)%npz + + if (present(tile_num)) tile_num = Atm(mytile)%tile + if (present(p_hydro)) p_hydro = Atm(mytile)%flagstruct%phys_hydrostatic + if (present( hydro)) hydro = Atm(mytile)%flagstruct%hydrostatic + + end subroutine atmosphere_control_data + +end module atmosphere_stub_mod diff --git a/compile_standalone b/compile_standalone new file mode 100755 index 0000000..11daf3e --- /dev/null +++ b/compile_standalone @@ -0,0 +1,41 @@ +#!/bin/sh -x +compile_all=1 +FC=mpif90 +INCS="-I. -I../FV3/gfsphysics/ -I../FMS/include -I../FV3/atmos_cubed_sphere -I../FMS/fv3gfs" +FLAGS64=" -traceback -real-size 64 -DSTOCHY_UNIT_TEST -c "$INCS +FLAGS=" -traceback -DSTOCHY_UNIT_TEST -c "$INCS +if [ $compile_all -eq 1 ];then + rm -f *.i90 *.i *.o *.mod lib*a + $FC ${FLAGS} fv_control_stub.F90 + $FC ${FLAGS} atmosphere_stub.F90 + $FC ${FLAGS64} stochy_namelist_def.F90 + $FC ${FLAGS64} stochy_resol_def.f + $FC ${FLAGS64} stochy_gg_def.f + $FC ${FLAGS64} spectral_layout.F90 + $FC ${FLAGS64} stochy_layout_lag.f + $FC ${FLAGS64} pln2eo_stochy.f + $FC ${FLAGS64} setlats_a_stochy.f + $FC ${FLAGS64} setlats_lag_stochy.f + $FC ${FLAGS64} glats_stochy.f + $FC ${FLAGS64} gozrineo_stochy.f + $FC ${FLAGS64} num_parthds_stochy.f + $FC ${FLAGS64} dezouv_stochy.f + $FC ${FLAGS64} dozeuv_stochy.f + $FC ${FLAGS64} epslon_stochy.f + $FC ${FLAGS64} four_to_grid_stochy.f + $FC ${FLAGS64} sumfln_stochy.f + $FC ${FLAGS64} get_lats_node_a_stochy.f + $FC ${FLAGS64} get_ls_node_stochy.f + $FC ${FLAGS64} compns_stochy.F90 + $FC ${FLAGS64} stochy_internal_state_mod.F90 + $FC ${FLAGS64} getcon_lag_stochy.f + $FC ${FLAGS64} getcon_spectral.F90 + $FC ${FLAGS64} initialize_spectral_mod.F90 + $FC ${FLAGS64} standalone_stochy_module.F90 + $FC ${FLAGS64} stochy_patterngenerator.F90 + $FC ${FLAGS64} stochy_data_mod.F90 + $FC ${FLAGS64} get_stochy_pattern.F90 + $FC ${FLAGS64} stochastic_physics.F90 + ar rv libstochastic_physics.a *.o +fi +$FC -traceback -real-size 64 -qopenmp -o standalone_stochy standalone_stochy.F90 ${INCS} -I/apps/netcdf/4.7.0/intel/18.0.5.274/include -L. -lstochastic_physics -L../FV3/atmos_cubed_sphere -lfv3core -L../FMS/FMS_INSTALL -lfms -L../FV3/gfsphysics -lgfsphys -L/scratch2/NCEPDEV/nwprod/NCEPLIBS/compilers/intel/18.0.5.274/lib -lsp_v2.0.3_d -L/scratch1/NCEPDEV/nems/emc.nemspara/soft/esmf/8.0.0bs48-intel18.0.5.274-impi2018.0.4-netcdf4.6.1/lib -Wl,-rpath,/scratch1/NCEPDEV/nems/emc.nemspara/soft/esmf/8.0.0bs48-intel18.0.5.274-impi2018.0.4-netcdf4.6.1/lib -lesmf -L/apps/netcdf/4.7.0/intel/18.0.5.274/lib -lnetcdff -lnetcdf diff --git a/compile_standalone_ca b/compile_standalone_ca new file mode 100755 index 0000000..cce7e8c --- /dev/null +++ b/compile_standalone_ca @@ -0,0 +1,17 @@ +#!/bin/sh +compile_all=1 +if [ $compile_all -eq 1 ];then + rm -f *.i90 *.i *.o *.mod lib*a + mpif90 -C -traceback -real-size 64 -c ../FV3/gfsphysics/physics/machine.F + mpif90 -C -traceback -real-size 64 -c ../FV3/gfsphysics/physics/mersenne_twister.f + mpif90 -C -traceback -I../FMS/include -c ../FMS/platform/platform.F90 + mpif90 -C -traceback -nowarn -DGFS_PHYS -I../FMS/include -c ../FMS/constants/constants.F90 + mpif90 -C -traceback -I../FV3/atmos_cubed_sphere -I../FMS/include -I../FMS/fv3gfs -c fv_control_stub.F90 + mpif90 -C -traceback -I../FV3/atmos_cubed_sphere -I../FMS/include -I../FMS/fv3gfs -c atmosphere_stub.F90 + mpif90 -C -traceback -c -real-size 64 standalone_stochy_module.F90 + mpif90 -C -traceback -I. -real-size 64 -c plumes.F90 + mpif90 -DSTOCHY_UNIT_TEST -real-size 64 -C -traceback -I../FMS/fv3gfs -I../FMS/FMS_INSTALL -I../FV3/atmos_cubed_sphere -c update_ca.F90 + mpif90 -DSTOCHY_UNIT_TEST -real-size 64 -C -traceback -I../FMS/fv3gfs -I../FMS/FMS_INSTALL -I../FV3/atmos_cubed_sphere -c cellular_automata_global.F90 + ar rv libcellular_automata.a *.o +fi +mpif90 -traceback -real-size 64 -qopenmp -o standalone_ca standalone_ca.F90 -I../FV3/atmos_cubed_sphere -I../FMS/FMS_INSTALL -I/apps/netcdf/4.7.0/intel/18.0.5.274/include -L. -lcellular_automata -L../FV3/atmos_cubed_sphere -lfv3core -L../FMS/FMS_INSTALL -lfms -L../FV3/gfsphysics -lgfsphys -L/scratch2/NCEPDEV/nwprod/NCEPLIBS/compilers/intel/18.0.5.274/lib -lsp_v2.0.3_d -L/scratch1/NCEPDEV/nems/emc.nemspara/soft/esmf/8.0.0bs48-intel18.0.5.274-impi2018.0.4-netcdf4.6.1/lib -Wl,-rpath,/scratch1/NCEPDEV/nems/emc.nemspara/soft/esmf/8.0.0bs48-intel18.0.5.274-impi2018.0.4-netcdf4.6.1/lib -lesmf -L/apps/netcdf/4.7.0/intel/18.0.5.274/lib -lnetcdff -lnetcdf diff --git a/compns_stochy.F90 b/compns_stochy.F90 index 420ccdb..a87b32a 100644 --- a/compns_stochy.F90 +++ b/compns_stochy.F90 @@ -44,8 +44,10 @@ subroutine compns_stochy (me,sz_nml,input_nml_file,fn_nml,nlunit,deltim,iret) character(len=*), intent(in) :: input_nml_file(sz_nml) character(len=64), intent(in) :: fn_nml real, intent(in) :: deltim - real tol + real tol,l_min + real :: rerth,circ,tmp_lat integer k,ios + integer,parameter :: four=4 ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ! @@ -54,10 +56,11 @@ subroutine compns_stochy (me,sz_nml,input_nml_file,fn_nml,nlunit,deltim,iret) shum_lscale,fhstoch,stochini,skeb_varspect_opt,sppt_sfclimit, & skeb,skeb_tau,skeb_vdof,skeb_lscale,iseed_skeb,skeb_vfilt,skeb_diss_smooth, & skeb_sigtop1,skeb_sigtop2,skebnorm,sppt_sigtop1,sppt_sigtop2,& - shum_sigefold,skebint,skeb_npass,use_zmtnblck + shum_sigefold,spptint,shumint,skebint,skeb_npass,use_zmtnblck,new_lscale namelist /nam_sfcperts/nsfcpert,pertz0,pertshc,pertzt,pertlai, & ! mg, sfcperts pertvegf,pertalb,iseed_sfc,sfc_tau,sfc_lscale,sppt_land + rerth =6.3712e+6 ! radius of earth (m) tol=0.01 ! tolerance for calculations ! spectral resolution defintion ntrunc=-999 @@ -78,6 +81,7 @@ subroutine compns_stochy (me,sz_nml,input_nml_file,fn_nml,nlunit,deltim,iret) ! logicals do_sppt = .false. use_zmtnblck = .false. + new_lscale = .false. do_shum = .false. do_skeb = .false. ! mg, sfcperts @@ -91,6 +95,8 @@ subroutine compns_stochy (me,sz_nml,input_nml_file,fn_nml,nlunit,deltim,iret) ! for SKEB random patterns. skeb_vfilt = 0 skebint = 0 + spptint = 0 + shumint = 0 skeb_npass = 11 ! number of passes of smoother for dissipation estiamte sppt_tau = -999. ! time scales shum_tau = -999. @@ -117,7 +123,7 @@ subroutine compns_stochy (me,sz_nml,input_nml_file,fn_nml,nlunit,deltim,iret) ! length scale. skeb_varspect_opt = 0 sppt_logit = .false. ! logit transform for sppt to bounded interval [-1,+1] - fhstoch = -999.0 ! forecast hour to dump random patterns + fhstoch = -999.0 ! forecast interval (in hours) to dump random patterns stochini = .false. ! true= read in pattern, false=initialize from seed #ifdef INTERNAL_FILE_NML @@ -185,11 +191,52 @@ subroutine compns_stochy (me,sz_nml,input_nml_file,fn_nml,nlunit,deltim,iret) iret=9 return ENDIF + IF (spptint == 0.) spptint=deltim + nssppt=nint(spptint/deltim) ! spptint in seconds + IF(nssppt<=0 .or. abs(nssppt-spptint/deltim)>tol) THEN + WRITE(0,*) "SPPT interval is invalid",spptint + iret=9 + return + ENDIF + IF (shumint == 0.) shumint=deltim + nsshum=nint(shumint/deltim) ! shumint in seconds + IF(nsshum<=0 .or. abs(nsshum-shumint/deltim)>tol) THEN + WRITE(0,*) "SHUM interval is invalid",shumint + iret=9 + return + ENDIF ! mg, sfcperts IF (pertz0(1) > 0 .OR. pertshc(1) > 0 .OR. pertzt(1) > 0 .OR. & pertlai(1) > 0 .OR. pertvegf(1) > 0 .OR. pertalb(1) > 0) THEN do_sfcperts=.true. ENDIF +!calculate ntrunc if not supplied + if (ntrunc .LT. 1) then + if (me==0) print*,'ntrunc not supplied, calculating' + circ=2*3.1415928*rerth ! start with lengthscale that is circumference of the earth + l_min=circ + do k=1,5 + if (sppt(k).GT.0) l_min=min(sppt_lscale(k),l_min) + if (shum(k).GT.0) l_min=min(shum_lscale(k),l_min) + if (skeb(k).GT.0) l_min=min(skeb_lscale(k),l_min) + enddo + !ntrunc=1.5*circ/l_min + ntrunc=circ/l_min + if (me==0) print*,'ntrunc calculated from l_min',l_min,ntrunc + endif + ! ensure lat_s is a mutiple of 4 with a reminader of two + ntrunc=INT((ntrunc+1)/four)*four+2 + if (me==0) print*,'NOTE ntrunc adjusted for even nlats',ntrunc + +! set up gaussian grid for ntrunc if not already defined. + if (lon_s.LT.1 .OR. lat_s.LT.1) then + lat_s=ntrunc*1.5+1 + lon_s=lat_s*2+4 +! Grid needs to be larger since interpolation is bi-linear + lat_s=lat_s*2 + lon_s=lon_s*2 + if (me==0) print*,'gaussian grid not set, defining here',lon_s,lat_s + endif ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ! ! All checks are successful. diff --git a/fv_control_stub.F90 b/fv_control_stub.F90 new file mode 100644 index 0000000..86d2bf9 --- /dev/null +++ b/fv_control_stub.F90 @@ -0,0 +1,1300 @@ + +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** + +!>@brief The module 'FV3_control' is for initialization and termination +!! of the model, and controls namelist parameters in FV3. +!---------------- +! FV control panel +!---------------- + +module fv_control_stub_mod +! Modules Included: +! +! +! +! +! +!
Module NameFunctions Included
+! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +! +!
constants_modpi=>pi_8, kappa, radius, grav, rdgas
field_manager_modMODEL_ATMOS
fms_modwrite_version_number, open_namelist_file, +! check_nml_error, close_file, file_exist
fv_arrays_modfv_atmos_type, allocate_fv_atmos_type, deallocate_fv_atmos_type, +! R_GRID
fv_diagnostics_modfv_diag_init_gn
fv_eta_modset_eta
fv_grid_tools_modinit_grid
fv_grid_utils_modgrid_utils_init, grid_utils_end, ptop_min
fv_mp_modmp_start, mp_assign_gid, domain_decomp,ng, switch_current_Atm, +! broadcast_domains, mp_barrier, is_master, setup_master
fv_io_modfv_io_exit
fv_restart_modfv_restart_init, fv_restart_end
fv_timing_modtiming_on, timing_off, timing_init, timing_prt
mpp_modmpp_send, mpp_sync, mpp_transmit, mpp_set_current_pelist, mpp_declare_pelist, +! mpp_root_pe, mpp_recv, mpp_sync_self, mpp_broadcast, read_input_nml, +! FATAL, mpp_error, mpp_pe, stdlog, mpp_npes, mpp_get_current_pelist, +! input_nml_file, get_unit, WARNING, read_ascii_file, INPUT_STR_LENGTH
mpp_domains_modmpp_get_data_domain, mpp_get_compute_domain, domain2D, mpp_define_nest_domains, +! nest_domain_type, mpp_get_global_domain, mpp_get_C2F_index, mpp_get_F2C_index, +! mpp_broadcast_domain, CENTER, CORNER, NORTH, EAST, WEST, SOUTH
mpp_parameter_modAGRID_PARAM=>AGRID
test_cases_modtest_case, bubble_do, alpha, nsolitons, soliton_Umax, soliton_size
tracer_manager_modtm_get_number_tracers => get_number_tracers,tm_get_tracer_index => get_tracer_index, +! tm_get_tracer_indices => get_tracer_indices, tm_set_tracer_profile => set_tracer_profile, +! tm_get_tracer_names => get_tracer_names,tm_check_if_prognostic=> check_if_prognostic, +! tm_register_tracers => register_tracers
+ + use constants_mod, only: pi=>pi_8, kappa, radius, grav, rdgas + use field_manager_mod, only: MODEL_ATMOS + use fms_mod, only: write_version_number, open_namelist_file, & + check_nml_error, close_file, file_exist + use mpp_mod, only: FATAL, mpp_error, mpp_pe, stdlog, & + mpp_npes, mpp_get_current_pelist, & + input_nml_file, get_unit, WARNING, & + read_ascii_file, INPUT_STR_LENGTH + use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain + use tracer_manager_mod, only: tm_get_number_tracers => get_number_tracers, & + tm_get_tracer_index => get_tracer_index, & + tm_get_tracer_indices => get_tracer_indices, & + tm_set_tracer_profile => set_tracer_profile, & + tm_get_tracer_names => get_tracer_names, & + tm_check_if_prognostic=> check_if_prognostic,& + tm_register_tracers => register_tracers + + use fv_io_mod, only: fv_io_exit + use fv_restart_mod, only: fv_restart_init, fv_restart_end + use fv_arrays_mod, only: fv_atmos_type, allocate_fv_atmos_type, deallocate_fv_atmos_type, & + R_GRID + use fv_grid_utils_mod, only: grid_utils_init, grid_utils_end, ptop_min + use fv_eta_mod, only: set_eta + use fv_grid_tools_mod, only: init_grid + use fv_mp_mod, only: mp_start, mp_assign_gid, domain_decomp + use fv_mp_mod, only: ng, switch_current_Atm + use fv_mp_mod, only: broadcast_domains, mp_barrier, is_master, setup_master +!!! CLEANUP: should be replaced by a getter function? + use test_cases_mod, only: test_case, bubble_do, alpha, nsolitons, soliton_Umax, soliton_size + use fv_timing_mod, only: timing_on, timing_off, timing_init, timing_prt + use mpp_domains_mod, only: domain2D + use mpp_domains_mod, only: mpp_define_nest_domains, nest_domain_type, mpp_get_global_domain + use mpp_domains_mod, only: mpp_get_C2F_index, mpp_get_F2C_index, mpp_broadcast_domain + use mpp_domains_mod, only: CENTER, CORNER, NORTH, EAST, WEST, SOUTH + use mpp_mod, only: mpp_send, mpp_sync, mpp_transmit, mpp_set_current_pelist, mpp_declare_pelist, mpp_root_pe, mpp_recv, mpp_sync_self, mpp_broadcast, read_input_nml + use fv_diagnostics_mod, only: fv_diag_init_gn + +#ifdef MULTI_GASES + use constants_mod, only: rvgas, cp_air + use multi_gases_mod, only: multi_gases_init, & + rilist => ri, & + cpilist => cpi +#endif + + implicit none + private + public setup_pointers + +!----------------------------------------------------------------------- +! Grid descriptor file setup +!----------------------------------------------------------------------- +!------------------------------------------ +! Model Domain parameters +! See fv_arrays.F90 for descriptions +!------------------------------------------ +!CLEANUP module pointers + character(len=80) , pointer :: grid_name + character(len=120), pointer :: grid_file + integer, pointer :: grid_type + integer , pointer :: hord_mt + integer , pointer :: kord_mt + integer , pointer :: kord_wz + integer , pointer :: hord_vt + integer , pointer :: hord_tm + integer , pointer :: hord_dp + integer , pointer :: kord_tm + integer , pointer :: hord_tr + integer , pointer :: kord_tr + real , pointer :: scale_z + real , pointer :: w_max + real , pointer :: z_min + real , pointer :: lim_fac + + integer , pointer :: nord + integer , pointer :: nord_tr + real , pointer :: dddmp + real , pointer :: d2_bg + real , pointer :: d4_bg + real , pointer :: vtdm4 + real , pointer :: trdm2 + real , pointer :: d2_bg_k1 + real , pointer :: d2_bg_k2 + real , pointer :: d2_divg_max_k1 + real , pointer :: d2_divg_max_k2 + real , pointer :: damp_k_k1 + real , pointer :: damp_k_k2 + integer , pointer :: n_zs_filter + integer , pointer :: nord_zs_filter + logical , pointer :: full_zs_filter + + logical , pointer :: RF_fast + logical , pointer :: consv_am + logical , pointer :: do_sat_adj + logical , pointer :: do_f3d + logical , pointer :: no_dycore + logical , pointer :: convert_ke + logical , pointer :: do_vort_damp + logical , pointer :: use_old_omega +! PG off centering: + real , pointer :: beta + integer , pointer :: n_sponge + real , pointer :: d_ext + integer , pointer :: nwat + logical , pointer :: warm_start + logical , pointer :: inline_q + real , pointer :: shift_fac + logical , pointer :: do_schmidt + real(kind=R_GRID) , pointer :: stretch_fac + real(kind=R_GRID) , pointer :: target_lat + real(kind=R_GRID) , pointer :: target_lon + + logical , pointer :: reset_eta + real , pointer :: p_fac + real , pointer :: a_imp + integer , pointer :: n_split + + real , pointer :: fac_n_spl + real , pointer :: fhouri + ! Default + integer , pointer :: m_split + integer , pointer :: k_split + logical , pointer :: use_logp + + integer , pointer :: q_split + integer , pointer :: print_freq + logical , pointer :: write_3d_diags + + integer , pointer :: npx + integer , pointer :: npy + integer , pointer :: npz + integer , pointer :: npz_rst + + integer , pointer :: ncnst + integer , pointer :: pnats + integer , pointer :: dnats + integer , pointer :: ntiles + integer , pointer :: nf_omega + integer , pointer :: fv_sg_adj + + integer , pointer :: na_init + logical , pointer :: nudge_dz + real , pointer :: p_ref + real , pointer :: dry_mass + integer , pointer :: nt_prog + integer , pointer :: nt_phys + real , pointer :: tau_h2o + + real , pointer :: delt_max + real , pointer :: d_con + real , pointer :: ke_bg + real , pointer :: consv_te + real , pointer :: tau + real , pointer :: rf_cutoff + logical , pointer :: filter_phys + logical , pointer :: dwind_2d + logical , pointer :: breed_vortex_inline + logical , pointer :: range_warn + logical , pointer :: fill + logical , pointer :: fill_dp + logical , pointer :: fill_wz + logical , pointer :: check_negative + logical , pointer :: non_ortho + logical , pointer :: adiabatic + logical , pointer :: moist_phys + logical , pointer :: do_Held_Suarez + logical , pointer :: do_reed_physics + logical , pointer :: reed_cond_only + logical , pointer :: reproduce_sum + logical , pointer :: adjust_dry_mass + logical , pointer :: fv_debug + logical , pointer :: srf_init + logical , pointer :: mountain + logical , pointer :: remap_t + logical , pointer :: z_tracer + + logical , pointer :: old_divg_damp + logical , pointer :: fv_land + logical , pointer :: nudge + logical , pointer :: nudge_ic + logical , pointer :: ncep_ic + logical , pointer :: nggps_ic + logical , pointer :: ecmwf_ic + logical , pointer :: gfs_phil + logical , pointer :: agrid_vel_rst + logical , pointer :: use_new_ncep + logical , pointer :: use_ncep_phy + logical , pointer :: fv_diag_ic + logical , pointer :: external_ic + logical , pointer :: external_eta + logical , pointer :: read_increment + character(len=128) , pointer :: res_latlon_dynamics + character(len=128) , pointer :: res_latlon_tracers + logical , pointer :: hydrostatic + logical , pointer :: phys_hydrostatic + logical , pointer :: use_hydro_pressure + logical , pointer :: do_uni_zfull !miz + logical , pointer :: adj_mass_vmr ! f1p + logical , pointer :: hybrid_z + logical , pointer :: Make_NH + logical , pointer :: make_hybrid_z + logical , pointer :: nudge_qv + real, pointer :: add_noise + + integer , pointer :: a2b_ord + integer , pointer :: c2l_ord + + integer, pointer :: ndims + + real(kind=R_GRID), pointer :: dx_const + real(kind=R_GRID), pointer :: dy_const + real(kind=R_GRID), pointer :: deglon_start, deglon_stop, & ! boundaries of latlon patch + deglat_start, deglat_stop + real(kind=R_GRID), pointer :: deglat + + logical, pointer :: nested, twowaynest + logical, pointer :: regional + integer, pointer :: bc_update_interval + integer, pointer :: parent_tile, refinement, nestbctype, nestupdate, nsponge, ioffset, joffset + real, pointer :: s_weight, update_blend + + integer, pointer :: layout(:), io_layout(:) + + integer :: ntilesMe ! Number of tiles on this process =1 for now + +#ifdef OVERLOAD_R4 + real :: too_big = 1.E8 +#else + real :: too_big = 1.E35 +#endif + public :: fv_init + + integer, public :: ngrids = 1 + integer, public, allocatable :: pelist_all(:) + integer :: commID, max_refinement_of_global = 1. + integer :: gid + + real :: umax = 350. !< max wave speed for grid_type>3 + integer :: parent_grid_num = -1 + + integer :: halo_update_type = 1 !< 1 for two-interfaces non-block + !< 2 for block + !< 3 for four-interfaces non-block + + + +! version number of this module +! Include variable "version" to be written to log file. +#include + + contains + +!------------------------------------------------------------------------------- +!>@brief The subroutine 'fv_init' initializes FV3. +!>@details It allocates memory, sets up MPI and processor lists, +!! sets up the grid, and controls FV3 namelist parameters. + subroutine fv_init(Atm, dt_atmos, grids_on_this_pe, p_split) + + type(fv_atmos_type), allocatable, intent(inout), target :: Atm(:) + real, intent(in) :: dt_atmos + logical, allocatable, intent(INOUT) :: grids_on_this_pe(:) + integer, intent(INOUT) :: p_split + + integer :: i, j, k, n, p + real :: sdt + +! tracers + integer :: num_family !< output of register_tracers + + integer :: isc_p, iec_p, jsc_p, jec_p, isg, ieg, jsg, jeg, upoff, jind + integer :: ic, jc + + gid = mpp_pe() + call init_nesting(Atm, grids_on_this_pe, p_split) + + !This call is needed to set up the pointers for fv_current_grid, even for a single-grid run + call switch_current_Atm(Atm(1), .false.) + call setup_pointers(Atm(1)) + +! Start up MPI + + !call mp_assign_gid + + ! Initialize timing routines + call timing_init + call timing_on('TOTAL') + + ! Setup the run from namelist + ntilesMe = size(Atm(:)) !Full number of Atm arrays; one less than number of grids, if multiple grids + + call run_setup(Atm,dt_atmos, grids_on_this_pe, p_split) ! initializes domain_decomp + + do n=1,ntilesMe + + !In a single-grid run this will still be needed to correctly set the domain + call switch_current_Atm(Atm(n)) + call setup_pointers(Atm(n)) + + target_lon = target_lon * pi/180. + target_lat = target_lat * pi/180. + + + if (grids_on_this_pe(n)) then + call allocate_fv_atmos_type(Atm(n), Atm(n)%bd%isd, Atm(n)%bd%ied, Atm(n)%bd%jsd, Atm(n)%bd%jed, & + Atm(n)%bd%isc, Atm(n)%bd%iec, Atm(n)%bd%jsc, Atm(n)%bd%jec, & + npx, npy, npz, ndims, ncnst, ncnst-pnats, ng, .false., grids_on_this_pe(n), ngrids) + + if (grids_on_this_pe(n)) then + + call switch_current_Atm(Atm(n)) + call setup_pointers(Atm(n)) + + if ( (Atm(n)%bd%iec-Atm(n)%bd%isc+1).lt.4 .or. (Atm(n)%bd%jec-Atm(n)%bd%jsc+1).lt.4 ) then + if (is_master()) write(*,'(6I6)') Atm(n)%bd%isc, Atm(n)%bd%iec, Atm(n)%bd%jsc, Atm(n)%bd%jec, n + call mpp_error(FATAL,'Domain Decomposition: Cubed Sphere compute domain has a & + &minium requirement of 4 points in X and Y, respectively') + end if + + endif + + !!CLEANUP: Convenience pointers + Atm(n)%gridstruct%nested => Atm(n)%neststruct%nested + Atm(n)%gridstruct%grid_type => Atm(n)%flagstruct%grid_type + Atm(n)%flagstruct%grid_number => Atm(n)%grid_number + Atm(n)%gridstruct%regional => Atm(n)%flagstruct%regional + + call init_grid(Atm(n), grid_name, grid_file, npx, npy, npz, ndims, ntiles, ng) + + ! Initialize the SW (2D) part of the model + !!!CLEANUP: this call could definitely use some cleaning up + call grid_utils_init(Atm(n), npx, npy, npz, non_ortho, grid_type, c2l_ord) + + !!!CLEANUP: Are these correctly writing out on all pes? + if ( is_master() ) then + sdt = dt_atmos/real(n_split*k_split*abs(p_split)) + write(*,*) ' ' + write(*,*) 'Divergence damping Coefficients' + write(*,*) 'For small dt=', sdt + write(*,*) 'External mode del-2 (m**2/s)=', d_ext*Atm(n)%gridstruct%da_min_c/sdt + write(*,*) 'Internal mode del-2 SMAG dimensionless coeff=', dddmp + write(*,*) 'Internal mode del-2 background diff=', d2_bg*Atm(n)%gridstruct%da_min_c/sdt + + if (nord==1) then + write(*,*) 'Internal mode del-4 background diff=', d4_bg + write(*,*) 'Vorticity del-4 (m**4/s)=', (vtdm4*Atm(n)%gridstruct%da_min)**2/sdt*1.E-6 + endif + if (nord==2) write(*,*) 'Internal mode del-6 background diff=', d4_bg + if (nord==3) write(*,*) 'Internal mode del-8 background diff=', d4_bg + write(*,*) 'tracer del-2 diff=', trdm2 + + write(*,*) 'Vorticity del-4 (m**4/s)=', (vtdm4*Atm(n)%gridstruct%da_min)**2/sdt*1.E-6 + write(*,*) 'beta=', beta + write(*,*) ' ' + endif + + + Atm(n)%ts = 300. + Atm(n)%phis = too_big + ! The following statements are to prevent the phatom corner regions from + ! growing instability + Atm(n)%u = 0. + Atm(n)%v = 0. + Atm(n)%ua = too_big + Atm(n)%va = too_big + + else !this grid is NOT defined on this pe + + !Allocate dummy arrays + call allocate_fv_atmos_type(Atm(n), Atm(n)%bd%isd, Atm(n)%bd%ied, Atm(n)%bd%jsd, Atm(n)%bd%jed, & + Atm(n)%bd%isc, Atm(n)%bd%iec, Atm(n)%bd%jsc, Atm(n)%bd%jec, & + npx, npy, npz, ndims, ncnst, ncnst-pnats, ng, .true., .false., ngrids) + + !Need to SEND grid_global to any child grids; this is received in setup_aligned_nest in fv_grid_tools + if (Atm(n)%neststruct%nested) then + + call mpp_get_global_domain( Atm(n)%parent_grid%domain, & + isg, ieg, jsg, jeg) + + !FIXME: Should replace this by generating the global grid (or at least one face thereof) on the + ! nested PEs instead of sending it around. + if (gid == Atm(n)%parent_grid%pelist(1)) then + call mpp_send(Atm(n)%parent_grid%grid_global(isg-ng:ieg+1+ng,jsg-ng:jeg+1+ng,1:2,parent_tile), & + size(Atm(n)%parent_grid%grid_global(isg-ng:ieg+1+ng,jsg-ng:jeg+1+ng,1:2,parent_tile)), & + Atm(n)%pelist(1)) !send to p_ind in setup_aligned_nest + call mpp_sync_self() + endif + + if (Atm(n)%neststruct%twowaynest) then + + !This in reality should be very simple. With the + ! restriction that only the compute domain data is + ! sent from the coarse grid, we can compute + ! exactly which coarse grid cells should use + ! which nested-grid data. We then don't need to send around p_ind. + + Atm(n)%neststruct%ind_update_h = -99999 + + if (Atm(n)%parent_grid%tile == Atm(n)%neststruct%parent_tile) then + + isc_p = Atm(n)%parent_grid%bd%isc + iec_p = Atm(n)%parent_grid%bd%iec + jsc_p = Atm(n)%parent_grid%bd%jsc + jec_p = Atm(n)%parent_grid%bd%jec + upoff = Atm(n)%neststruct%upoff + + Atm(n)%neststruct%jsu = jsc_p + Atm(n)%neststruct%jeu = jsc_p-1 + do j=jsc_p,jec_p+1 + if (j < joffset+upoff) then + do i=isc_p,iec_p+1 + Atm(n)%neststruct%ind_update_h(i,j,2) = -9999 + enddo + Atm(n)%neststruct%jsu = Atm(n)%neststruct%jsu + 1 + elseif (j > joffset + (npy-1)/refinement - upoff) then + do i=isc_p,iec_p+1 + Atm(n)%neststruct%ind_update_h(i,j,2) = -9999 + enddo + else + jind = (j - joffset)*refinement + 1 + do i=isc_p,iec_p+1 + Atm(n)%neststruct%ind_update_h(i,j,2) = jind + enddo + if ( (j < joffset + (npy-1)/refinement - upoff) .and. j <= jec_p) Atm(n)%neststruct%jeu = j + endif + !write(mpp_pe()+4000,*) j, joffset, upoff, Atm(n)%neststruct%ind_update_h(isc_p,j,2) + enddo + + Atm(n)%neststruct%isu = isc_p + Atm(n)%neststruct%ieu = isc_p-1 + do i=isc_p,iec_p+1 + if (i < ioffset+upoff) then + Atm(n)%neststruct%ind_update_h(i,:,1) = -9999 + Atm(n)%neststruct%isu = Atm(n)%neststruct%isu + 1 + elseif (i > ioffset + (npx-1)/refinement - upoff) then + Atm(n)%neststruct%ind_update_h(i,:,1) = -9999 + else + Atm(n)%neststruct%ind_update_h(i,:,1) = (i-ioffset)*refinement + 1 + if ( (i < ioffset + (npx-1)/refinement - upoff) .and. i <= iec_p) Atm(n)%neststruct%ieu = i + end if + !write(mpp_pe()+5000,*) i, ioffset, upoff, Atm(n)%neststruct%ind_update_h(i,jsc_p,1) + enddo + + end if + + + end if + + endif + endif + end do + + if (ntilesMe > 1) call switch_current_Atm(Atm(1)) + if (ntilesMe > 1) call setup_pointers(Atm(1)) + + end subroutine fv_init +!------------------------------------------------------------------------------- + + +!>@brief The subroutine 'run_setup' initializes the run from a namelist. + subroutine run_setup(Atm, dt_atmos, grids_on_this_pe, p_split) + type(fv_atmos_type), intent(inout), target :: Atm(:) + real, intent(in) :: dt_atmos + logical, intent(INOUT) :: grids_on_this_pe(:) + integer, intent(INOUT) :: p_split + !--- local variables --- + character(len=80) :: tracerName, errString + character(len=32) :: nested_grid_filename + integer :: ios, ierr, f_unit, unit + logical :: exists + + real :: dim0 = 180. !< base dimension + real :: dt0 = 1800. !< base time step + real :: ns0 = 5. !< base nsplit for base dimension + !< For cubed sphere 5 is better + !real :: umax = 350. ! max wave speed for grid_type>3 ! Now defined above + real :: dimx, dl, dp, dxmin, dymin, d_fac + + integer :: n0split + integer :: n, nn, i + + integer :: pe_counter + +! local version of these variables to allow PGI compiler to compile + character(len=128) :: res_latlon_dynamics = '' + character(len=128) :: res_latlon_tracers = '' + character(len=80) :: grid_name = '' + character(len=120) :: grid_file = '' + + namelist /fv_grid_nml/ grid_name, grid_file + namelist /fv_core_nml/npx, npy, ntiles, npz, npz_rst, layout, io_layout, ncnst, nwat, & + use_logp, p_fac, a_imp, k_split, n_split, m_split, q_split, print_freq, write_3d_diags, do_schmidt, & + hord_mt, hord_vt, hord_tm, hord_dp, hord_tr, shift_fac, stretch_fac, target_lat, target_lon, & + kord_mt, kord_wz, kord_tm, kord_tr, fv_debug, fv_land, nudge, do_sat_adj, do_f3d, & + external_ic, read_increment, ncep_ic, nggps_ic, ecmwf_ic, use_new_ncep, use_ncep_phy, fv_diag_ic, & + external_eta, res_latlon_dynamics, res_latlon_tracers, scale_z, w_max, z_min, lim_fac, & + dddmp, d2_bg, d4_bg, vtdm4, trdm2, d_ext, delt_max, beta, non_ortho, n_sponge, & + warm_start, adjust_dry_mass, mountain, d_con, ke_bg, nord, nord_tr, convert_ke, use_old_omega, & + dry_mass, grid_type, do_Held_Suarez, do_reed_physics, reed_cond_only, & + consv_te, fill, filter_phys, fill_dp, fill_wz, consv_am, RF_fast, & + range_warn, dwind_2d, inline_q, z_tracer, reproduce_sum, adiabatic, do_vort_damp, no_dycore, & + tau, tau_h2o, rf_cutoff, nf_omega, hydrostatic, fv_sg_adj, breed_vortex_inline, & + na_init, nudge_dz, hybrid_z, Make_NH, n_zs_filter, nord_zs_filter, full_zs_filter, reset_eta, & + pnats, dnats, a2b_ord, remap_t, p_ref, d2_bg_k1, d2_bg_k2, & + c2l_ord, dx_const, dy_const, umax, deglat, & + deglon_start, deglon_stop, deglat_start, deglat_stop, & + phys_hydrostatic, use_hydro_pressure, make_hybrid_z, old_divg_damp, add_noise, & + nested, twowaynest, parent_grid_num, parent_tile, nudge_qv, & + refinement, nestbctype, nestupdate, nsponge, s_weight, & + ioffset, joffset, check_negative, nudge_ic, halo_update_type, gfs_phil, agrid_vel_rst, & + do_uni_zfull, adj_mass_vmr, fac_n_spl, fhouri, regional, bc_update_interval + + namelist /test_case_nml/test_case, bubble_do, alpha, nsolitons, soliton_Umax, soliton_size +#ifdef MULTI_GASES + namelist /multi_gases_nml/ rilist,cpilist +#endif + + + pe_counter = mpp_root_pe() + +! Make alpha = 0 the default: + alpha = 0. + bubble_do = .false. + test_case = 11 ! (USGS terrain) + +#ifdef INTERNAL_FILE_NML +! Read Main namelist + read (input_nml_file,fv_grid_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_grid_nml') +#else + f_unit=open_namelist_file() + rewind (f_unit) +! Read Main namelist + read (f_unit,fv_grid_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_grid_nml') + call close_file(f_unit) +#endif + + call write_version_number ( 'FV_CONTROL_MOD', version ) + unit = stdlog() + write(unit, nml=fv_grid_nml) + + do n=1,size(Atm) + + call switch_current_Atm(Atm(n), .false.) + call setup_pointers(Atm(n)) + Atm(n)%grid_number = n + if (grids_on_this_pe(n)) then + call fv_diag_init_gn(Atm(n)) + endif + +#ifdef INTERNAL_FILE_NML + ! Set input_file_nml for correct parent/nest initialization + if (n > 1) then + write(nested_grid_filename,'(A4, I2.2)') 'nest', n + call read_input_nml(nested_grid_filename) + endif + ! Read FVCORE namelist + read (input_nml_file,fv_core_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_core_nml') +#ifdef MULTI_GASES + if( is_master() ) print *,' enter multi_gases: ncnst = ',ncnst + allocate (rilist(0:ncnst)) + allocate (cpilist(0:ncnst)) + rilist = 0.0 + cpilist = 0.0 + rilist(0) = rdgas + rilist(1) = rvgas + cpilist(0) = cp_air + cpilist(1) = 4*cp_air + ! Read multi_gases namelist + read (input_nml_file,multi_gases_nml,iostat=ios) + ierr = check_nml_error(ios,'multi_gases_nml') +#endif + ! Read Test_Case namelist + read (input_nml_file,test_case_nml,iostat=ios) + ierr = check_nml_error(ios,'test_case_nml') + + ! Reset input_file_nml to default behavior + call read_input_nml +#else + if (size(Atm) == 1) then + f_unit = open_namelist_file() + else if (n == 1) then + f_unit = open_namelist_file('input.nml') + else + write(nested_grid_filename,'(A10, I2.2, A4)') 'input_nest', n, '.nml' + f_unit = open_namelist_file(nested_grid_filename) + endif + + ! Read FVCORE namelist + read (f_unit,fv_core_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_core_nml') + +#ifdef MULTI_GASES + if( is_master() ) print *,' enter multi_gases: ncnst = ',ncnst + allocate (rilist(0:ncnst)) + allocate (cpilist(0:ncnst)) + rilist = 0.0 + cpilist = 0.0 + rilist(0) = rdgas + rilist(1) = rvgas + cpilist(0) = cp_air + cpilist(1) = 4*cp_air + ! Read multi_gases namelist + rewind (f_unit) + read (f_unit,multi_gases_nml,iostat=ios) + ierr = check_nml_error(ios,'multi_gases_nml') +#endif + ! Read Test_Case namelist + rewind (f_unit) + read (f_unit,test_case_nml,iostat=ios) + ierr = check_nml_error(ios,'test_case_nml') + call close_file(f_unit) +#endif + write(unit, nml=fv_core_nml) + write(unit, nml=test_case_nml) +#ifdef MULTI_GASES + write(unit, nml=multi_gases_nml) + call multi_gases_init(ncnst,nwat) +#endif + + if (len_trim(grid_file) /= 0) Atm(n)%flagstruct%grid_file = grid_file + if (len_trim(grid_name) /= 0) Atm(n)%flagstruct%grid_name = grid_name + if (len_trim(res_latlon_dynamics) /= 0) Atm(n)%flagstruct%res_latlon_dynamics = res_latlon_dynamics + if (len_trim(res_latlon_tracers) /= 0) Atm(n)%flagstruct%res_latlon_tracers = res_latlon_tracers + + !*** single tile for Cartesian grids + if (grid_type>3) then + ntiles=1 + non_ortho = .false. + nf_omega = 0 + endif + + if (.not. (nested .or. regional)) Atm(n)%neststruct%npx_global = npx + + ! Define n_split if not in namelist + if (ntiles == 6) then + dimx = 4.0*(npx-1) + if ( hydrostatic ) then + if ( npx >= 120 ) ns0 = 6 + else + if ( npx <= 45 ) then + ns0 = 6 + elseif ( npx <= 90 ) then + ns0 = 7 + else + ns0 = 8 + endif + endif + else + dimx = max ( npx, 2*(npy-1) ) + endif + + if (grid_type < 4) then + n0split = nint ( ns0*abs(dt_atmos)*dimx/(dt0*dim0) + 0.49 ) + elseif (grid_type == 4 .or. grid_type == 7) then + n0split = nint ( 2.*umax*dt_atmos/sqrt(dx_const**2 + dy_const**2) + 0.49 ) + elseif (grid_type == 5 .or. grid_type == 6) then + if (grid_type == 6) then + deglon_start = 0.; deglon_stop = 360. + endif + dl = (deglon_stop-deglon_start)*pi/(180.*(npx-1)) + dp = (deglat_stop-deglat_start)*pi/(180.*(npy-1)) + + dxmin=dl*radius*min(cos(deglat_start*pi/180.-ng*dp), & + cos(deglat_stop *pi/180.+ng*dp)) + dymin=dp*radius + n0split = nint ( 2.*umax*dt_atmos/sqrt(dxmin**2 + dymin**2) + 0.49 ) + endif + n0split = max ( 1, n0split ) + + if ( n_split == 0 ) then + n_split = nint( real(n0split)/real(k_split*abs(p_split)) * stretch_fac + 0.5 ) + if(is_master()) write(*,*) 'For k_split (remapping)=', k_split + if(is_master()) write(*,198) 'n_split is set to ', n_split, ' for resolution-dt=',npx,npy,ntiles,dt_atmos + else + if(is_master()) write(*,199) 'Using n_split from the namelist: ', n_split + endif + if (is_master() .and. n == 1 .and. abs(p_split) > 1) then + write(*,199) 'Using p_split = ', p_split + endif + + if (Atm(n)%neststruct%nested) then + do i=1,n-1 + if (Atm(i)%grid_number == parent_grid_num) then + Atm(n)%parent_grid => Atm(i) + exit + end if + end do + if (.not. associated(Atm(n)%parent_grid)) then + write(errstring,'(2(A,I3))') "Could not find parent grid #", parent_grid_num, ' for grid #', n + call mpp_error(FATAL, errstring) + end if + + !Note that if a gnomonic grid has a parent it is a NESTED gnomonic grid and therefore only has one tile + if ( Atm(n)%parent_grid%flagstruct%grid_type < 3 .and. & + .not. associated(Atm(n)%parent_grid%parent_grid)) then + if (parent_tile > 6 .or. parent_tile < 1) then + call mpp_error(FATAL, 'parent tile must be between 1 and 6 if the parent is a cubed-sphere grid') + end if + else + if (parent_tile /= 1) then + call mpp_error(FATAL, 'parent tile must be 1 if the parent is not a cubed-sphere grid') + end if + end if + + if ( refinement < 1 ) call mpp_error(FATAL, 'grid refinement must be positive') + + if (nestupdate == 1 .or. nestupdate == 2) then + + if (mod(npx-1,refinement) /= 0 .or. mod(npy-1,refinement) /= 0) then + call mpp_error(WARNING, 'npx-1 or npy-1 is not evenly divisible by the refinement ratio; averaging update cannot be mass-conservative.') + end if + + end if + + if ( consv_te > 0.) then + call mpp_error(FATAL, 'The global energy fixer cannot be used on a nested grid. consv_te must be set to 0.') + end if + + Atm(n)%neststruct%refinement_of_global = Atm(n)%neststruct%refinement * Atm(n)%parent_grid%neststruct%refinement_of_global + max_refinement_of_global = max(Atm(n)%neststruct%refinement_of_global,max_refinement_of_global) + Atm(n)%neststruct%npx_global = Atm(n)%neststruct%refinement * Atm(n)%parent_grid%neststruct%npx_global + + else + Atm(n)%neststruct%ioffset = -999 + Atm(n)%neststruct%joffset = -999 + Atm(n)%neststruct%parent_tile = -1 + Atm(n)%neststruct%refinement = -1 + end if + + if (Atm(n)%neststruct%nested) then + if (Atm(n)%flagstruct%grid_type >= 4 .and. Atm(n)%parent_grid%flagstruct%grid_type >= 4) then + Atm(n)%flagstruct%dx_const = Atm(n)%parent_grid%flagstruct%dx_const / real(Atm(n)%neststruct%refinement) + Atm(n)%flagstruct%dy_const = Atm(n)%parent_grid%flagstruct%dy_const / real(Atm(n)%neststruct%refinement) + end if + end if + + +!---------------------------------------- +! Adjust divergence damping coefficients: +!---------------------------------------- +! d_fac = real(n0split)/real(n_split) +! dddmp = dddmp * d_fac +! d2_bg = d2_bg * d_fac +! d4_bg = d4_bg * d_fac +! d_ext = d_ext * d_fac +! vtdm4 = vtdm4 * d_fac + if (old_divg_damp) then + if (is_master()) write(*,*) " fv_control: using original values for divergence damping " + d2_bg_k1 = 6. ! factor for d2_bg (k=1) - default(4.) + d2_bg_k2 = 4. ! factor for d2_bg (k=2) - default(2.) + d2_divg_max_k1 = 0.02 ! d2_divg max value (k=1) - default(0.05) + d2_divg_max_k2 = 0.01 ! d2_divg max value (k=2) - default(0.02) + damp_k_k1 = 0. ! damp_k value (k=1) - default(0.05) + damp_k_k2 = 0. ! damp_k value (k=2) - default(0.025) + elseif (n_sponge == 0 ) then + if ( d2_bg_k1 > 1. ) d2_bg_k1 = 0.20 + if ( d2_bg_k2 > 1. ) d2_bg_k2 = 0.015 + endif + +! if ( beta < 1.e-5 ) beta = 0. ! beta < 0 is used for non-hydrostatic "one_grad_p" + + if ( .not.hydrostatic ) then + if ( m_split==0 ) then + m_split = 1. + abs(dt_atmos)/real(k_split*n_split*abs(p_split)) + if (abs(a_imp) < 0.5) then + if(is_master()) write(*,199) 'm_split is set to ', m_split + endif + endif + if(is_master()) then + write(*,*) 'Off center implicit scheme param=', a_imp + write(*,*) ' p_fac=', p_fac + endif + endif + + if(is_master()) then + if (n_sponge >= 0) write(*,199) 'Using n_sponge : ', n_sponge + write(*,197) 'Using non_ortho : ', non_ortho + endif + + 197 format(A,l7) + 198 format(A,i2.2,A,i4.4,'x',i4.4,'x',i1.1,'-',f9.3) + 199 format(A,i3.3) + + if (.not. (nested .or. regional)) alpha = alpha*pi + + + allocate(Atm(n)%neststruct%child_grids(size(Atm))) + Atm(N)%neststruct%child_grids = .false. + + !Broadcast data + + !Check layout + + enddo + + !Set pelists + do n=1,size(Atm) + if (ANY(Atm(n)%pelist == gid)) then + call mpp_set_current_pelist(Atm(n)%pelist) + call mpp_get_current_pelist(Atm(n)%pelist, commID=commID) + call mp_start(commID,halo_update_type) + endif + + if (Atm(n)%neststruct%nested) then + Atm(n)%neststruct%parent_proc = ANY(Atm(n)%parent_grid%pelist == gid) + Atm(n)%neststruct%child_proc = ANY(Atm(n)%pelist == gid) + endif + enddo + + do n=1,size(Atm) + + call switch_current_Atm(Atm(n),.false.) + call setup_pointers(Atm(n)) + !! CLEANUP: WARNING not sure what changes to domain_decomp may cause + call domain_decomp(npx,npy,ntiles,grid_type,nested,Atm(n),layout,io_layout) + enddo + + !!! CLEANUP: This sets the pelist to ALL, which is also + !!! required for the define_nest_domains step in the next loop. + !!! Later the pelist must be reset to the 'local' pelist. + call broadcast_domains(Atm) + + do n=1,size(Atm) + call switch_current_Atm(Atm(n)) + call setup_pointers(Atm(n)) + + if (nested) then + if (mod(npx-1 , refinement) /= 0 .or. mod(npy-1, refinement) /= 0) & + call mpp_error(FATAL, 'npx or npy not an even refinement of its coarse grid.') + + !Pelist needs to be set to ALL (which should have been done + !in broadcast_domains) to get this to work + call mpp_define_nest_domains(Atm(n)%neststruct%nest_domain, Atm(n)%domain, Atm(parent_grid_num)%domain, & + 7, parent_tile, & + 1, npx-1, 1, npy-1, & !Grid cells, not points + ioffset, ioffset + (npx-1)/refinement - 1, & + joffset, joffset + (npy-1)/refinement - 1, & + (/ (i,i=0,mpp_npes()-1) /), extra_halo = 0, name="nest_domain") !What pelist to use? + call mpp_define_nest_domains(Atm(n)%neststruct%nest_domain, Atm(n)%domain, Atm(parent_grid_num)%domain, & + 7, parent_tile, & + 1, npx-1, 1, npy-1, & !Grid cells, not points + ioffset, ioffset + (npx-1)/refinement - 1, & + joffset, joffset + (npy-1)/refinement - 1, & + (/ (i,i=0,mpp_npes()-1) /), extra_halo = 0, name="nest_domain") !What pelist to use? +! (/ (i,i=0,mpp_npes()-1) /), extra_halo = 2, name="nest_domain_for_BC") !What pelist to use? + + Atm(parent_grid_num)%neststruct%child_grids(n) = .true. + + if (Atm(n)%neststruct%nestbctype > 1) then + + call mpp_error(FATAL, 'nestbctype > 1 not yet implemented') + + !This check is due to a bug which has not yet been identified. Beware. +! if (Atm(n)%parent_grid%flagstruct%hord_tr == 7) & +! call mpp_error(FATAL, "Flux-form nested BCs (nestbctype > 1) should not use hord_tr == 7 (on parent grid), since there is no guarantee of tracer mass conservation with this option.") + +!!$ if (Atm(n)%flagstruct%q_split > 0 .and. Atm(n)%parent_grid%flagstruct%q_split > 0) then +!!$ if (mod(Atm(n)%flagstruct%q_split,Atm(n)%parent_grid%flagstruct%q_split) /= 0) call mpp_error(FATAL, & +!!$ "Flux-form nested BCs (nestbctype > 1) require q_split on the nested grid to be evenly divisible by that on the coarse grid.") +!!$ endif +!!$ if (mod((Atm(n)%npx-1),Atm(n)%neststruct%refinement) /= 0 .or. mod((Atm(n)%npy-1),Atm(n)%neststruct%refinement) /= 0) call mpp_error(FATAL, & +!!$ "Flux-form nested BCs (nestbctype > 1) requires npx and npy to be one more than a multiple of the refinement ratio.") +!!$ Atm(n)%parent_grid%neststruct%do_flux_BCs = .true. +!!$ if (Atm(n)%neststruct%nestbctype == 3 .or. Atm(n)%neststruct%nestbctype == 4) Atm(n)%parent_grid%neststruct%do_2way_flux_BCs = .true. + Atm(n)%neststruct%upoff = 0 + endif + + end if + + do nn=1,size(Atm) + if (n == 1) allocate(Atm(nn)%neststruct%nest_domain_all(size(Atm))) + Atm(nn)%neststruct%nest_domain_all(n) = Atm(n)%neststruct%nest_domain + enddo + + end do + + do n=1,size(Atm) + if (ANY(Atm(n)%pelist == gid)) then + call mpp_set_current_pelist(Atm(n)%pelist) + endif + enddo + + end subroutine run_setup + subroutine init_nesting(Atm, grids_on_this_pe, p_split) + + type(fv_atmos_type), intent(inout), allocatable :: Atm(:) + logical, allocatable, intent(INOUT) :: grids_on_this_pe(:) + integer, intent(INOUT) :: p_split + character(100) :: pe_list_name + integer :: nest_pes(100) + integer :: n, npes, ntiles, pecounter, i + integer, allocatable :: pelist(:) + integer :: f_unit, ios, ierr + + !This is an OPTIONAL namelist, that needs to be read before everything else + namelist /nest_nml/ ngrids, ntiles, nest_pes, p_split + + call mp_assign_gid + + nest_pes = 0 + ntiles = -999 + +#ifdef INTERNAL_FILE_NML + read (input_nml_file,nest_nml,iostat=ios) + ierr = check_nml_error(ios,'nest_nml') +#else + f_unit=open_namelist_file() + rewind (f_unit) + read (f_unit,nest_nml,iostat=ios) + ierr = check_nml_error(ios,'nest_nml') + call close_file(f_unit) +#endif + + if (ntiles /= -999) ngrids = ntiles + if (ngrids > 10) call mpp_error(FATAL, "More than 10 nested grids not supported") + + allocate(Atm(ngrids)) + + allocate(grids_on_this_pe(ngrids)) + grids_on_this_pe = .false. !initialization + + npes = mpp_npes() + + ! Need to get a global pelist to send data around later? + allocate( pelist_all(npes) ) + pelist_all = (/ (i,i=0,npes-1) /) + pelist_all = pelist_all + mpp_root_pe() + + if (ngrids == 1) then + + !Set up the single pelist + allocate(Atm(1)%pelist(npes)) + Atm(1)%pelist = (/(i, i=0, npes-1)/) + Atm(1)%pelist = Atm(1)%pelist + mpp_root_pe() + call mpp_declare_pelist(Atm(1)%pelist) + call mpp_set_current_pelist(Atm(1)%pelist) + !Now set in domain_decomp + !masterproc = Atm(1)%pelist(1) + call setup_master(Atm(1)%pelist) + grids_on_this_pe(1) = .true. + Atm(1)%npes_this_grid = npes + + else + + pecounter = mpp_root_pe() + do n=1,ngrids + if (n == 1) then + pe_list_name = '' + else + write(pe_list_name,'(A4, I2.2)') 'nest', n + endif + + if (nest_pes(n) == 0) then + if (n < ngrids) call mpp_error(FATAL, 'Only nest_pes(ngrids) in nest_nml can be zero; preceeding values must be nonzero.') + allocate(Atm(n)%pelist(npes-pecounter)) + Atm(n)%pelist = (/(i, i=pecounter, npes-1)/) + if (n > 1) then + call mpp_declare_pelist(Atm(n)%pelist, trim(pe_list_name)) + !Make sure nested-grid input file exists + if (.not. file_exist('input_'//trim(pe_list_name)//'.nml')) then + call mpp_error(FATAL, "Could not find nested grid namelist input_"//trim(pe_list_name)//".nml") + endif + endif + exit + else + allocate(Atm(n)%pelist(nest_pes(n))) + Atm(n)%pelist = (/ (i, i=pecounter, pecounter+nest_pes(n)-1) /) + if (Atm(n)%pelist(nest_pes(n)) >= npes) then + call mpp_error(FATAL, 'PEs assigned by nest_pes in nest_nml exceeds number of available PEs.') + endif + + call mpp_declare_pelist(Atm(n)%pelist, trim(pe_list_name)) + !Make sure nested-grid input file exists + if (n > 1) then + if (.not. file_exist('input_'//trim(pe_list_name)//'.nml')) then + call mpp_error(FATAL, "Could not find nested grid namelist input_"//trim(pe_list_name)//".nml") + endif + endif + pecounter = pecounter+nest_pes(n) + endif + enddo + + !Set pelists + do n=1,ngrids + Atm(n)%npes_this_grid = size(Atm(n)%pelist) + if (ANY(gid == Atm(n)%pelist)) then + call mpp_set_current_pelist(Atm(n)%pelist) + !now set in domain_decomp + !masterproc = Atm(n)%pelist(1) + call setup_master(Atm(n)%pelist) + grids_on_this_pe(n) = .true. + exit + endif + enddo + + if (pecounter /= npes) then + call mpp_error(FATAL, 'nest_pes in nest_nml does not assign all of the available PEs.') + endif + endif + + !Layout is checked later, in fv_control + + end subroutine init_nesting + +!>@brief The subroutine 'setup_pointers' associates the MODULE flag pointers +!! with the ARRAY flag variables for the grid active on THIS pe so the flags +!! can be read in from the namelist. + subroutine setup_pointers(Atm) + + type(fv_atmos_type), intent(INOUT), target :: Atm + + !This routine associates the MODULE flag pointers with the ARRAY flag variables for the grid active on THIS pe so the flags can be read in from the namelist. + + res_latlon_dynamics => Atm%flagstruct%res_latlon_dynamics + res_latlon_tracers => Atm%flagstruct%res_latlon_tracers + + grid_type => Atm%flagstruct%grid_type + grid_name => Atm%flagstruct%grid_name + grid_file => Atm%flagstruct%grid_file + hord_mt => Atm%flagstruct%hord_mt + kord_mt => Atm%flagstruct%kord_mt + kord_wz => Atm%flagstruct%kord_wz + hord_vt => Atm%flagstruct%hord_vt + hord_tm => Atm%flagstruct%hord_tm + hord_dp => Atm%flagstruct%hord_dp + kord_tm => Atm%flagstruct%kord_tm + hord_tr => Atm%flagstruct%hord_tr + kord_tr => Atm%flagstruct%kord_tr + scale_z => Atm%flagstruct%scale_z + w_max => Atm%flagstruct%w_max + z_min => Atm%flagstruct%z_min + lim_fac => Atm%flagstruct%lim_fac + nord => Atm%flagstruct%nord + nord_tr => Atm%flagstruct%nord_tr + dddmp => Atm%flagstruct%dddmp + d2_bg => Atm%flagstruct%d2_bg + d4_bg => Atm%flagstruct%d4_bg + vtdm4 => Atm%flagstruct%vtdm4 + trdm2 => Atm%flagstruct%trdm2 + d2_bg_k1 => Atm%flagstruct%d2_bg_k1 + d2_bg_k2 => Atm%flagstruct%d2_bg_k2 + d2_divg_max_k1 => Atm%flagstruct%d2_divg_max_k1 + d2_divg_max_k2 => Atm%flagstruct%d2_divg_max_k2 + damp_k_k1 => Atm%flagstruct%damp_k_k1 + damp_k_k2 => Atm%flagstruct%damp_k_k2 + n_zs_filter => Atm%flagstruct%n_zs_filter + nord_zs_filter => Atm%flagstruct%nord_zs_filter + full_zs_filter => Atm%flagstruct%full_zs_filter + RF_fast => Atm%flagstruct%RF_fast + consv_am => Atm%flagstruct%consv_am + do_sat_adj => Atm%flagstruct%do_sat_adj + do_f3d => Atm%flagstruct%do_f3d + no_dycore => Atm%flagstruct%no_dycore + convert_ke => Atm%flagstruct%convert_ke + do_vort_damp => Atm%flagstruct%do_vort_damp + use_old_omega => Atm%flagstruct%use_old_omega + beta => Atm%flagstruct%beta + n_sponge => Atm%flagstruct%n_sponge + d_ext => Atm%flagstruct%d_ext + nwat => Atm%flagstruct%nwat + use_logp => Atm%flagstruct%use_logp + warm_start => Atm%flagstruct%warm_start + inline_q => Atm%flagstruct%inline_q + shift_fac => Atm%flagstruct%shift_fac + do_schmidt => Atm%flagstruct%do_schmidt + stretch_fac => Atm%flagstruct%stretch_fac + target_lat => Atm%flagstruct%target_lat + target_lon => Atm%flagstruct%target_lon + regional => Atm%flagstruct%regional + bc_update_interval => Atm%flagstruct%bc_update_interval + reset_eta => Atm%flagstruct%reset_eta + p_fac => Atm%flagstruct%p_fac + a_imp => Atm%flagstruct%a_imp + n_split => Atm%flagstruct%n_split + fac_n_spl => Atm%flagstruct%fac_n_spl + fhouri => Atm%flagstruct%fhouri + m_split => Atm%flagstruct%m_split + k_split => Atm%flagstruct%k_split + use_logp => Atm%flagstruct%use_logp + q_split => Atm%flagstruct%q_split + print_freq => Atm%flagstruct%print_freq + write_3d_diags => Atm%flagstruct%write_3d_diags + npx => Atm%flagstruct%npx + npy => Atm%flagstruct%npy + npz => Atm%flagstruct%npz + npz_rst => Atm%flagstruct%npz_rst + ncnst => Atm%flagstruct%ncnst + pnats => Atm%flagstruct%pnats + dnats => Atm%flagstruct%dnats + ntiles => Atm%flagstruct%ntiles + nf_omega => Atm%flagstruct%nf_omega + fv_sg_adj => Atm%flagstruct%fv_sg_adj + na_init => Atm%flagstruct%na_init + nudge_dz => Atm%flagstruct%nudge_dz + p_ref => Atm%flagstruct%p_ref + dry_mass => Atm%flagstruct%dry_mass + nt_prog => Atm%flagstruct%nt_prog + nt_phys => Atm%flagstruct%nt_phys + tau_h2o => Atm%flagstruct%tau_h2o + delt_max => Atm%flagstruct%delt_max + d_con => Atm%flagstruct%d_con + ke_bg => Atm%flagstruct%ke_bg + consv_te => Atm%flagstruct%consv_te + tau => Atm%flagstruct%tau + rf_cutoff => Atm%flagstruct%rf_cutoff + filter_phys => Atm%flagstruct%filter_phys + dwind_2d => Atm%flagstruct%dwind_2d + breed_vortex_inline => Atm%flagstruct%breed_vortex_inline + range_warn => Atm%flagstruct%range_warn + fill => Atm%flagstruct%fill + fill_dp => Atm%flagstruct%fill_dp + fill_wz => Atm%flagstruct%fill_wz + check_negative => Atm%flagstruct%check_negative + non_ortho => Atm%flagstruct%non_ortho + adiabatic => Atm%flagstruct%adiabatic + moist_phys => Atm%flagstruct%moist_phys + do_Held_Suarez => Atm%flagstruct%do_Held_Suarez + do_reed_physics => Atm%flagstruct%do_reed_physics + reed_cond_only => Atm%flagstruct%reed_cond_only + reproduce_sum => Atm%flagstruct%reproduce_sum + adjust_dry_mass => Atm%flagstruct%adjust_dry_mass + fv_debug => Atm%flagstruct%fv_debug + srf_init => Atm%flagstruct%srf_init + mountain => Atm%flagstruct%mountain + remap_t => Atm%flagstruct%remap_t + z_tracer => Atm%flagstruct%z_tracer + old_divg_damp => Atm%flagstruct%old_divg_damp + fv_land => Atm%flagstruct%fv_land + nudge => Atm%flagstruct%nudge + nudge_ic => Atm%flagstruct%nudge_ic + ncep_ic => Atm%flagstruct%ncep_ic + nggps_ic => Atm%flagstruct%nggps_ic + ecmwf_ic => Atm%flagstruct%ecmwf_ic + gfs_phil => Atm%flagstruct%gfs_phil + agrid_vel_rst => Atm%flagstruct%agrid_vel_rst + use_new_ncep => Atm%flagstruct%use_new_ncep + use_ncep_phy => Atm%flagstruct%use_ncep_phy + fv_diag_ic => Atm%flagstruct%fv_diag_ic + external_ic => Atm%flagstruct%external_ic + external_eta => Atm%flagstruct%external_eta + read_increment => Atm%flagstruct%read_increment + + hydrostatic => Atm%flagstruct%hydrostatic + phys_hydrostatic => Atm%flagstruct%phys_hydrostatic + use_hydro_pressure => Atm%flagstruct%use_hydro_pressure + do_uni_zfull => Atm%flagstruct%do_uni_zfull !miz + adj_mass_vmr => Atm%flagstruct%adj_mass_vmr !f1p + hybrid_z => Atm%flagstruct%hybrid_z + Make_NH => Atm%flagstruct%Make_NH + make_hybrid_z => Atm%flagstruct%make_hybrid_z + nudge_qv => Atm%flagstruct%nudge_qv + add_noise => Atm%flagstruct%add_noise + a2b_ord => Atm%flagstruct%a2b_ord + c2l_ord => Atm%flagstruct%c2l_ord + ndims => Atm%flagstruct%ndims + + dx_const => Atm%flagstruct%dx_const + dy_const => Atm%flagstruct%dy_const + deglon_start => Atm%flagstruct%deglon_start + deglon_stop => Atm%flagstruct%deglon_stop + deglat_start => Atm%flagstruct%deglat_start + deglat_stop => Atm%flagstruct%deglat_stop + + deglat => Atm%flagstruct%deglat + + nested => Atm%neststruct%nested + twowaynest => Atm%neststruct%twowaynest + parent_tile => Atm%neststruct%parent_tile + refinement => Atm%neststruct%refinement + nestbctype => Atm%neststruct%nestbctype + nestupdate => Atm%neststruct%nestupdate + nsponge => Atm%neststruct%nsponge + s_weight => Atm%neststruct%s_weight + ioffset => Atm%neststruct%ioffset + joffset => Atm%neststruct%joffset + + layout => Atm%layout + io_layout => Atm%io_layout + end subroutine setup_pointers + + +end module fv_control_stub_mod diff --git a/get_stochy_pattern.F90 b/get_stochy_pattern.F90 index d8c080f..f682ee0 100644 --- a/get_stochy_pattern.F90 +++ b/get_stochy_pattern.F90 @@ -14,7 +14,11 @@ module get_stochy_pattern_mod patterngenerator_advance use stochy_internal_state_mod, only: stochy_internal_state use fv_mp_mod, only : mp_reduce_sum,is_master - use GFS_typedefs, only: GFS_control_type, GFS_grid_type +#ifdef STOCHY_UNIT_TEST +use standalone_stochy_module, only: GFS_control_type, GFS_grid_type +# else +use GFS_typedefs, only: GFS_control_type, GFS_grid_type +#endif use mersenne_twister, only: random_seed use dezouv_stochy_mod, only: dezouv_stochy use dozeuv_stochy_mod, only: dozeuv_stochy @@ -77,9 +81,9 @@ subroutine get_random_pattern_fv3(rpattern,npatterns,& enddo call mp_reduce_sum(workg,lonf,latg) + ! interpolate to cube grid - allocate(rslmsk(lonf,latg)) do blk=1,nblks len=size(Grid(blk)%xlat,1) diff --git a/standalone_ca.F90 b/standalone_ca.F90 new file mode 100644 index 0000000..8c84b7b --- /dev/null +++ b/standalone_ca.F90 @@ -0,0 +1,265 @@ +program standalone_stochy_new + +use standalone_stochy_module + +use fv_mp_mod, only: mp_start, domain_decomp +use atmosphere_stub_mod, only: Atm,atmosphere_init_stub +use fv_arrays_mod, only: fv_atmos_type +use fv_control_mod, only: setup_pointers +!use mpp_domains +use mpp_mod, only: mpp_set_current_pelist,mpp_get_current_pelist,mpp_init,mpp_pe,mpp_npes ,mpp_declare_pelist +use mpp_domains_mod, only: mpp_broadcast_domain,MPP_DOMAIN_TIME,mpp_domains_init ,mpp_domains_set_stack_size +use fms_mod, only: fms_init +!use time_manager_mod, only: time_type +use xgrid_mod, only: grid_box_type +use netcdf + + +implicit none +type(GFS_control_type) :: Model +integer, parameter :: nlevs=64 +integer :: ntasks,fid,ct +integer :: nthreads,omp_get_num_threads +integer :: ncid,xt_dim_id,yt_dim_id,time_dim_id,xt_var_id,yt_var_id,time_var_id,ca_out_id +integer :: ca1_id,ca2_id,ca3_id +character*1 :: strid +type(GFS_grid_type),allocatable :: Grid(:) +type(GFS_diag_type),allocatable :: Diag(:) +type(GFS_statein_type),allocatable :: Statein(:) +type(GFS_coupling_type),allocatable :: Coupling(:) +include 'mpif.h' +include 'netcdf.inc' +real(kind=4) :: ts,undef + +integer :: cres,blksz,nblks,ierr,my_id,i,j,nx2,ny2,nx,ny,id +integer,target :: npx,npy +integer :: ng,layout(2),io_layout(2),commID,grid_type,ntiles +integer :: halo_update_type = 1 +logical,target :: nested +integer :: pe,npes,stackmax=4000000 + +real(kind=4),allocatable,dimension(:,:) :: workg +real(kind=4),allocatable,dimension(:) :: grid_xt,grid_yt +real(kind=8),pointer ,dimension(:,:) :: area +type(grid_box_type) :: grid_box +!type(time_type) :: Time ! current time +!type(time_type) :: Time_step ! atmospheric time step. +!type(time_type) :: Time_init ! reference time. +!---cellular automata control parameters +integer :: nca !< number of independent cellular automata +integer :: nlives !< cellular automata lifetime +integer :: ncells !< cellular automata finer grid +real :: nfracseed !< cellular automata seed probability +integer :: nseed !< cellular automata seed frequency +logical :: do_ca !< cellular automata main switch +logical :: ca_sgs !< switch for sgs ca +logical :: ca_global !< switch for global ca +logical :: ca_smooth !< switch for gaussian spatial filter +logical :: isppt_deep !< switch for combination with isppt_deep. OBS! Switches off SPPT on other tendencies! +logical :: isppt_pbl +logical :: isppt_shal +logical :: pert_flux +logical :: pert_trigger +integer :: iseed_ca !< seed for random number generation in ca scheme +integer :: nspinup !< number of iterations to spin up the ca +integer :: ca_amplitude +real :: nthresh !< threshold used for perturbed vertical velocity + +NAMELIST /gfs_physics_nml/ nca, ncells, nlives, nfracseed,nseed, nthresh, & + do_ca,ca_sgs, ca_global,iseed_ca,ca_smooth,isppt_pbl,isppt_shal,isppt_deep,nspinup,& + pert_trigger,pert_flux,ca_amplitude + +! default values +nca = 1 +ncells = 5 +nlives = 10 +nfracseed = 0.5 +nseed = 100000 +iseed_ca = 0 +nspinup = 1 +do_ca = .false. +ca_sgs = .false. +ca_global = .false. +ca_smooth = .false. +isppt_deep = .false. +isppt_shal = .false. +isppt_pbl = .false. +pert_trigger = .false. +pert_flux = .false. +ca_amplitude = 500. +nthresh = 0.0 + +! open namelist file +open (unit=565, file='input.nml', READONLY, status='OLD', iostat=ierr) +read(565,gfs_physics_nml) +close(565) +! define stuff +ng=3 ! ghost region +undef=9.99e+20 + +! initialize fms +call fms_init() +call mpp_init() +call fms_init +my_id=mpp_pe() + +call atmosphere_init_stub (grid_box, area) +!define domain +isd=Atm(1)%bd%isd +ied=Atm(1)%bd%ied +jsd=Atm(1)%bd%jsd +jed=Atm(1)%bd%jed +isc=Atm(1)%bd%isc +iec=Atm(1)%bd%iec +jsc=Atm(1)%bd%jsc +jec=Atm(1)%bd%jec +nx=Atm(1)%npx-1 +ny=Atm(1)%npy-1 +allocate(workg(nx,ny)) + +! for this simple test, nblocks = ny, blksz=ny +nblks=ny +blksz=nx +nthreads = omp_get_num_threads() +Model%me=my_id + +Model%nca = nca +Model%ncells = ncells +Model%nlives = nlives +Model%nfracseed = nfracseed +Model%nseed = nseed +Model%iseed_ca = iseed_ca +Model%nspinup = nspinup +Model%do_ca = do_ca +Model%ca_sgs = ca_sgs +Model%ca_global = ca_global +Model%ca_smooth = ca_smooth +Model%isppt_deep = isppt_deep +Model%isppt_pbl = isppt_pbl +Model%isppt_shal = isppt_shal +Model%pert_flux = pert_flux +Model%pert_trigger = pert_trigger +Model%nthresh = nthresh + +! setup GFS_init parameters + +!define model grid + +allocate(grid_xt(nx),grid_yt(ny)) +do i=1,nx + grid_xt(i)=i +enddo +do i=1,ny + grid_yt(i)=i +enddo + +!setup GFS_coupling +allocate(Diag(nblks)) +allocate(Coupling(nblks)) +allocate(Statein(nblks)) +write(strid,'(I1.1)') my_id+1 +fid=30+my_id +ierr=nf90_create('ca_out.tile'//strid//'.nc',cmode=NF90_CLOBBER,ncid=ncid) +ierr=NF90_DEF_DIM(ncid,"grid_xt",nx,xt_dim_id) +ierr=NF90_DEF_DIM(ncid,"grid_yt",ny,yt_dim_id) +ierr=NF90_DEF_DIM(ncid,"time",NF90_UNLIMITED,time_dim_id) + !> - Define the dimension variables. +ierr=NF90_DEF_VAR(ncid,"grid_xt",NF90_FLOAT,(/ xt_dim_id /), xt_var_id) +ierr=NF90_PUT_ATT(ncid,xt_var_id,"long_name","T-cell longitude") +ierr=NF90_PUT_ATT(ncid,xt_var_id,"cartesian_axis","X") +ierr=NF90_PUT_ATT(ncid,xt_var_id,"units","degrees_E") +ierr=NF90_DEF_VAR(ncid,"grid_yt",NF90_FLOAT,(/ yt_dim_id /), yt_var_id) +ierr=NF90_PUT_ATT(ncid,yt_var_id,"long_name","T-cell latitude") +ierr=NF90_PUT_ATT(ncid,yt_var_id,"cartesian_axis","Y") +ierr=NF90_PUT_ATT(ncid,yt_var_id,"units","degrees_N") +ierr=NF90_DEF_VAR(ncid,"time",NF90_FLOAT,(/ time_dim_id /), time_var_id) +ierr=NF90_PUT_ATT(ncid,time_var_id,"long_name","time") +ierr=NF90_PUT_ATT(ncid,time_var_id,"units","hours since 2014-08-01 00:00:00") +ierr=NF90_PUT_ATT(ncid,time_var_id,"cartesian_axis","T") +ierr=NF90_PUT_ATT(ncid,time_var_id,"calendar_type","JULIAN") +ierr=NF90_PUT_ATT(ncid,time_var_id,"calendar","JULIAN") +!ierr=NF90_DEF_VAR(ncid,"ca_out",NF90_FLOAT,(/xt_dim_id, yt_dim_id ,time_dim_id/), ca_out_id) +!ierr=NF90_PUT_ATT(ncid,ca_out_id,"long_name","random pattern") +!ierr=NF90_PUT_ATT(ncid,ca_out_id,"units","None") +!ierr=NF90_PUT_ATT(ncid,ca_out_id,"missing_value",undef) +!ierr=NF90_PUT_ATT(ncid,ca_out_id,"_FillValue",undef) +!ierr=NF90_PUT_ATT(ncid,ca_out_id,"cell_methods","time: point") +ierr=NF90_DEF_VAR(ncid,"ca1",NF90_FLOAT,(/xt_dim_id, yt_dim_id ,time_dim_id/), ca1_id) +ierr=NF90_PUT_ATT(ncid,ca1_id,"long_name","random pattern") +ierr=NF90_PUT_ATT(ncid,ca1_id,"units","None") +ierr=NF90_PUT_ATT(ncid,ca1_id,"missing_value",undef) +ierr=NF90_PUT_ATT(ncid,ca1_id,"_FillValue",undef) +ierr=NF90_PUT_ATT(ncid,ca1_id,"cell_methods","time: point") +ierr=NF90_DEF_VAR(ncid,"ca2",NF90_FLOAT,(/xt_dim_id, yt_dim_id ,time_dim_id/), ca2_id) +ierr=NF90_PUT_ATT(ncid,ca2_id,"long_name","random pattern") +ierr=NF90_PUT_ATT(ncid,ca2_id,"units","None") +ierr=NF90_PUT_ATT(ncid,ca2_id,"missing_value",undef) +ierr=NF90_PUT_ATT(ncid,ca2_id,"_FillValue",undef) +ierr=NF90_PUT_ATT(ncid,ca2_id,"cell_methods","time: point") +ierr=NF90_DEF_VAR(ncid,"ca3",NF90_FLOAT,(/xt_dim_id, yt_dim_id ,time_dim_id/), ca3_id) +ierr=NF90_PUT_ATT(ncid,ca3_id,"long_name","random pattern") +ierr=NF90_PUT_ATT(ncid,ca3_id,"units","None") +ierr=NF90_PUT_ATT(ncid,ca3_id,"missing_value",undef) +ierr=NF90_PUT_ATT(ncid,ca3_id,"_FillValue",undef) +ierr=NF90_PUT_ATT(ncid,ca3_id,"cell_methods","time: point") +ierr=NF90_ENDDEF(ncid) +ierr=NF90_PUT_VAR(ncid,xt_var_id,grid_xt) +ierr=NF90_PUT_VAR(ncid,yt_var_id,grid_yt) +! allocate diagnostics +DO i =1,nblks + allocate(Diag(i)%ca_out(blksz)) + allocate(Diag(i)%ca_deep(blksz)) + allocate(Diag(i)%ca_turb(blksz)) + allocate(Diag(i)%ca_shal(blksz)) + allocate(Diag(i)%ca_rad(blksz)) + allocate(Diag(i)%ca_micro(blksz)) + allocate(Diag(i)%ca1(blksz)) + allocate(Diag(i)%ca2(blksz)) + allocate(Diag(i)%ca3(blksz)) +! allocate coupling + allocate(Coupling(i)%cape(blksz)) + allocate(Coupling(i)%ca_out(blksz)) + allocate(Coupling(i)%ca_deep(blksz)) + allocate(Coupling(i)%ca_turb(blksz)) + allocate(Coupling(i)%ca_shal(blksz)) + allocate(Coupling(i)%ca_rad(blksz)) + allocate(Coupling(i)%ca_micro(blksz)) + allocate(Coupling(i)%ca1(blksz)) + allocate(Coupling(i)%ca2(blksz)) + allocate(Coupling(i)%ca3(blksz)) +! allocate coupling + allocate(Statein(i)%pgr(blksz)) + allocate(Statein(i)%qgrs(blksz,nlevs,1)) + allocate(Statein(i)%vvl(blksz,nlevs)) + allocate(Statein(i)%prsl(blksz,nlevs)) +ENDDO +ct=1 +do i=1,600 + ts=i/8.0 ! hard coded to write out hourly based on a 450 second time-step + call cellular_automata_global(i-1, Statein, Coupling, Diag, & + nblks, Model%levs, Model%nca, Model%ncells, & + Model%nlives, Model%nfracseed, Model%nseed, & + Model%nthresh, Model%ca_global, Model%ca_sgs, & + Model%iseed_ca, Model%ca_smooth, Model%nspinup, & + blksz) + if (mod(i,8).EQ.0) then + do j=1,ny + workg(:,j)=Diag(j)%ca1(:) + enddo + ierr=NF90_PUT_VAR(ncid,ca1_id,workg,(/1,1,ct/)) + do j=1,ny + workg(:,j)=Diag(j)%ca2(:) + enddo + ierr=NF90_PUT_VAR(ncid,ca2_id,workg,(/1,1,ct/)) + do j=1,ny + workg(:,j)=Diag(j)%ca3(:) + enddo + ierr=NF90_PUT_VAR(ncid,ca3_id,workg,(/1,1,ct/)) + ierr=NF90_PUT_VAR(ncid,time_var_id,ts,(/ct/)) + ct=ct+1 + if (my_id.EQ.0) write(6,fmt='(a,i5,4f6.3)') 'ca=',i,Diag(1)%ca1(1:4) + endif +enddo +!close(fid) +ierr=NF90_CLOSE(ncid) +end diff --git a/standalone_stochy.F90 b/standalone_stochy.F90 new file mode 100644 index 0000000..75e9ce7 --- /dev/null +++ b/standalone_stochy.F90 @@ -0,0 +1,318 @@ +program standalone_stochy + +use standalone_stochy_module +use stochastic_physics, only : init_stochastic_physics,run_stochastic_physics + +use fv_mp_mod, only: mp_start, domain_decomp +use atmosphere_stub_mod, only: Atm,atmosphere_init_stub +use fv_arrays_mod, only: fv_atmos_type +use fv_control_stub_mod, only: setup_pointers +!use mpp_domains +use mpp_mod, only: mpp_set_current_pelist,mpp_get_current_pelist,mpp_init,mpp_pe,mpp_npes ,mpp_declare_pelist +use mpp_domains_mod, only: mpp_broadcast_domain,MPP_DOMAIN_TIME,mpp_domains_init ,mpp_domains_set_stack_size +use fms_mod, only: fms_init +use xgrid_mod, only: grid_box_type +use netcdf + +implicit none +type(GFS_control_type) :: Model +type(GFS_init_type) :: Init_parm +integer, parameter :: nlevs=64 +integer :: ntasks,fid +integer :: nthreads,omp_get_num_threads +integer :: ncid,xt_dim_id,yt_dim_id,time_dim_id,xt_var_id,yt_var_id,time_var_id +integer :: varid1,varid2,varid3,varid4 +integer :: zt_dim_id,zt_var_id +character*1 :: strid +type(GFS_grid_type),allocatable :: Grid(:) +type(GFS_coupling_type),allocatable :: Coupling(:) +! stochastic namelist fields +integer nssppt,nsshum,nsskeb,lon_s,lat_s,ntrunc +integer skeb_varspect_opt,skeb_npass +logical sppt_sfclimit + +real(kind=kind_dbl_prec) :: skeb_sigtop1,skeb_sigtop2, & + sppt_sigtop1,sppt_sigtop2,shum_sigefold, & + skeb_vdof +real(kind=kind_dbl_prec) fhstoch,skeb_diss_smooth,spptint,shumint,skebint,skebnorm +real(kind=kind_dbl_prec), dimension(5) :: skeb,skeb_lscale,skeb_tau +real(kind=kind_dbl_prec), dimension(5) :: sppt,sppt_lscale,sppt_tau +real(kind=kind_dbl_prec), dimension(5) :: shum,shum_lscale,shum_tau +integer,dimension(5) ::skeb_vfilt +integer(8),dimension(5) ::iseed_sppt,iseed_shum,iseed_skeb +logical stochini,sppt_logit,new_lscale +logical use_zmtnblck +logical sppt_land +include 'mpif.h' +include 'netcdf.inc' +real :: ak(nlevs+1),bk(nlevs+1) +real(kind=4) :: ts,undef + +data ak(:) /0.000, 0.000, 0.575, 5.741, 21.516, 55.712, 116.899, 214.015, 356.223, 552.720, 812.489, & + 1143.988, 1554.789, 2051.150, 2637.553, 3316.217, 4086.614, 4945.029, 5884.206, 6893.117, & + 7956.908, 9057.051, 10171.712, 11276.348, 12344.490, 13348.671, 14261.435, 15056.342, & + 15708.893, 16197.315, 16503.145, 16611.604, 16511.736, 16197.967, 15683.489, 14993.074, & + 14154.316, 13197.065, 12152.937, 11054.853, 9936.614, 8832.537, 7777.150, 6804.874, 5937.050,& + 5167.146, 4485.493, 3883.052, 3351.460, 2883.038, 2470.788, 2108.366, 1790.051, 1510.711, & + 1265.752, 1051.080, 863.058, 698.457, 554.424, 428.434, 318.266, 221.958, 137.790, 64.247,0.0 / +data bk(:) /1.00000000, 0.99467117, 0.98862660, 0.98174226, 0.97386760, 0.96482760, 0.95443410, 0.94249105, & + 0.92879730, 0.91315103, 0.89535499, 0.87522358, 0.85259068, 0.82731885, 0.79930973, 0.76851469, & + 0.73494524, 0.69868290, 0.65988702, 0.61879963, 0.57574666, 0.53113484, 0.48544332, 0.43921080, & + 0.39301825, 0.34746850, 0.30316412, 0.26068544, 0.22057019, 0.18329623, 0.14926878, 0.11881219, & + 0.09216691, 0.06947458, 0.05064684, 0.03544162, 0.02355588, 0.01463712, 0.00829402, 0.00410671, & + 0.00163591, 0.00043106, 0.00003697, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, & + 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, & + 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, & + 0.00000000 / +integer :: cres,blksz,nblks,ierr,my_id,i,j,k,nx2,ny2,nx,ny,id +integer,target :: npx,npy +integer :: ng,layout(2),io_layout(2),commID,grid_type,ntiles +integer :: halo_update_type = 1 +real :: dx,dy,pi,rd,cp +logical,target :: nested +logical :: write_this_tile +integer :: nargs,ntile_out,nlunit,pe,npes,stackmax=4000000 +character*80 :: fname +character*1 :: ntile_out_str + +real(kind=4),allocatable,dimension(:,:) :: workg +real(kind=4),allocatable,dimension(:,:,:) :: workg3d +real(kind=4),allocatable,dimension(:) :: grid_xt,grid_yt +real(kind=8),pointer ,dimension(:,:) :: area +real(kind=8) :: ex3d(nlevs+1),pressi(nlevs+1),pressl(nlevs),p1000,exn + +type(grid_box_type) :: grid_box + + namelist /nam_stochy/ntrunc,lon_s,lat_s,sppt,sppt_tau,sppt_lscale,sppt_logit, & + iseed_shum,iseed_sppt,shum,shum_tau,& + shum_lscale,fhstoch,stochini,skeb_varspect_opt,sppt_sfclimit, & + skeb,skeb_tau,skeb_vdof,skeb_lscale,iseed_skeb,skeb_vfilt,skeb_diss_smooth, & + skeb_sigtop1,skeb_sigtop2,skebnorm,sppt_sigtop1,sppt_sigtop2,& + shum_sigefold,spptint,shumint,skebint,skeb_npass,use_zmtnblck,new_lscale +write_this_tile=.false. +ntile_out_str='0' +nargs=iargc() +if (nargs.EQ.1) then + call getarg(1,ntile_out_str) +endif +read(ntile_out_str,'(I1.1)') ntile_out +open (unit=nlunit, file='input.nml', READONLY, status='OLD') +read(nlunit,nam_stochy) +close(nlunit) +Model%do_sppt=.false. +Model%do_shum=.false. +Model%do_skeb=.false. +if (sppt(1).GT.0) Model%do_sppt=.true. +if (shum(1).GT.0) Model%do_shum=.true. +if (skeb(1).GT.0) Model%do_skeb=.true. +! define stuff +ng=3 +pi=3.14159265359 +undef=9.99e+20 +p1000=100000.0 +!define mid-layer pressure +rd=287.0 +cp=1004.0 +DO k=1,nlevs + pressi(k)=ak(k)+p1000*bk(k) +ENDDO +ex3d=cp*(pressi/p1000)**(rd/cp) +DO k=1,nlevs + exn = (ex3d(k)*pressi(k)-ex3d(k+1)*pressi(k+1))/((cp+rd)*(pressi(k)-pressi(k+1))) + pressl(k)=p1000*exn**(cp/rd) +ENDDO + +call fms_init() +call mpp_init() +call fms_init +my_id=mpp_pe() +ntasks=mpp_npes() + +call atmosphere_init_stub (grid_box, area) +isd=Atm(1)%bd%isd +ied=Atm(1)%bd%ied +jsd=Atm(1)%bd%jsd +jed=Atm(1)%bd%jed +isc=Atm(1)%bd%isc +iec=Atm(1)%bd%iec +jsc=Atm(1)%bd%jsc +jec=Atm(1)%bd%jec +nx=Atm(1)%npx-1 +ny=Atm(1)%npy-1 +allocate(workg(nx,ny)) +allocate(workg3d(nx,ny,nlevs)) +nblks=ny +blksz=nx +Allocate(Model%blksz(nblks)) +Model%blksz(:)=blksz +nthreads = omp_get_num_threads() +Model%me=my_id +Model%phour=0 +Model%kdt=1 +Model%dtp=900 +Model%fn_nml='input.nml' +Model%levs=nlevs +allocate(Init_parm%blksz(nblks)) +Init_parm%blksz(:)=blksz +! setup GFS_init parameters +allocate(Init_parm%ak(nlevs+1)) +allocate(Init_parm%bk(nlevs+1)) +Init_parm%ak=ak +Init_parm%bk=bk +Init_parm%nlunit=21 + +!define model grid +Model%nx=nx +Model%ny=ny +dx=360.0/Model%nx +dy=180.0/Model%ny +allocate(Init_parm%xlon(Model%nx,Model%ny)) +allocate(Init_parm%xlat(Model%nx,Model%ny)) +Init_parm%xlon(:,:)=Atm(1)%gridstruct%agrid(:,:,1) +Init_parm%xlat(:,:)=Atm(1)%gridstruct%agrid(:,:,2) + +allocate(Grid(nblks)) +do i=1,nblks + allocate(Grid(i)%xlat(blksz)) + allocate(Grid(i)%xlon(blksz)) +enddo +do j=1,nblks + Grid(j)%xlat(:)=Init_parm%xlat(:,j) + Grid(j)%xlon(:)=Init_parm%xlon(:,j) +enddo +allocate(grid_xt(nx),grid_yt(ny)) +do i=1,nx + grid_xt(i)=i +enddo +do i=1,ny + grid_yt(i)=i +enddo +!setup GFS_coupling +allocate(Coupling(nblks)) +call init_stochastic_physics(Model, Init_parm, ntasks, nthreads) +call get_outfile(fname) +write(strid,'(I1.1)') my_id+1 +if (ntile_out.EQ.0) write_this_tile=.true. +if ((my_id+1).EQ.ntile_out) write_this_tile=.true. +print*,trim(fname)//'.tile'//strid//'.nc',write_this_tile +if (write_this_tile) then +fid=30+my_id +!ierr=nf90_create(trim(fname)//'.tile'//strid//'.nc',cmode=NF90_CLOBBER,ncid=ncid) +ierr=nf90_create(trim(fname)//'.tile'//strid//'.nc',cmode=NF90_CLOBBER,ncid=ncid) +ierr=NF90_DEF_DIM(ncid,"grid_xt",nx,xt_dim_id) +ierr=NF90_DEF_DIM(ncid,"grid_yt",ny,yt_dim_id) +if (Model%do_skeb)ierr=NF90_DEF_DIM(ncid,"p_ref",nlevs,zt_dim_id) +ierr=NF90_DEF_DIM(ncid,"time",NF90_UNLIMITED,time_dim_id) + !> - Define the dimension variables. +ierr=NF90_DEF_VAR(ncid,"grid_xt",NF90_FLOAT,(/ xt_dim_id /), xt_var_id) +ierr=NF90_PUT_ATT(ncid,xt_var_id,"long_name","T-cell longitude") +ierr=NF90_PUT_ATT(ncid,xt_var_id,"cartesian_axis","X") +ierr=NF90_PUT_ATT(ncid,xt_var_id,"units","degrees_E") +ierr=NF90_DEF_VAR(ncid,"grid_yt",NF90_FLOAT,(/ yt_dim_id /), yt_var_id) +ierr=NF90_PUT_ATT(ncid,yt_var_id,"long_name","T-cell latitude") +ierr=NF90_PUT_ATT(ncid,yt_var_id,"cartesian_axis","Y") +ierr=NF90_PUT_ATT(ncid,yt_var_id,"units","degrees_N") +if (Model%do_skeb)then + ierr=NF90_DEF_VAR(ncid,"p_ref",NF90_FLOAT,(/ zt_dim_id /), zt_var_id) + ierr=NF90_PUT_ATT(ncid,zt_var_id,"long_name","reference pressure") + ierr=NF90_PUT_ATT(ncid,zt_var_id,"cartesian_axis","Z") + ierr=NF90_PUT_ATT(ncid,zt_var_id,"units","Pa") +endif +ierr=NF90_DEF_VAR(ncid,"time",NF90_FLOAT,(/ time_dim_id /), time_var_id) +ierr=NF90_DEF_VAR(ncid,"time",NF90_FLOAT,(/ time_dim_id /), time_var_id) +ierr=NF90_PUT_ATT(ncid,time_var_id,"long_name","time") +ierr=NF90_PUT_ATT(ncid,time_var_id,"units","hours since 2014-08-01 00:00:00") +ierr=NF90_PUT_ATT(ncid,time_var_id,"cartesian_axis","T") +ierr=NF90_PUT_ATT(ncid,time_var_id,"calendar_type","JULIAN") +ierr=NF90_PUT_ATT(ncid,time_var_id,"calendar","JULIAN") +if (Model%do_sppt)then + ierr=NF90_DEF_VAR(ncid,"sppt_wts",NF90_FLOAT,(/xt_dim_id, yt_dim_id ,time_dim_id/), varid1) + ierr=NF90_PUT_ATT(ncid,varid1,"long_name","sppt pattern") + ierr=NF90_PUT_ATT(ncid,varid1,"units","None") + ierr=NF90_PUT_ATT(ncid,varid1,"missing_value",undef) + ierr=NF90_PUT_ATT(ncid,varid1,"_FillValue",undef) + ierr=NF90_PUT_ATT(ncid,varid1,"cell_methods","time: point") +endif +if (Model%do_shum)then + ierr=NF90_DEF_VAR(ncid,"shum_wts",NF90_FLOAT,(/xt_dim_id, yt_dim_id ,time_dim_id/), varid2) + ierr=NF90_PUT_ATT(ncid,varid2,"long_name","shum pattern") + ierr=NF90_PUT_ATT(ncid,varid2,"units","None") + ierr=NF90_PUT_ATT(ncid,varid2,"missing_value",undef) + ierr=NF90_PUT_ATT(ncid,varid2,"_FillValue",undef) + ierr=NF90_PUT_ATT(ncid,varid2,"cell_methods","time: point") +endif +if (Model%do_skeb)then + ierr=NF90_DEF_VAR(ncid,"skebu_wts",NF90_FLOAT,(/xt_dim_id, yt_dim_id ,zt_dim_id,time_dim_id/), varid3) + ierr=NF90_DEF_VAR(ncid,"skebv_wts",NF90_FLOAT,(/xt_dim_id, yt_dim_id ,zt_dim_id,time_dim_id/), varid4) + ierr=NF90_PUT_ATT(ncid,varid3,"long_name","skeb u pattern") + ierr=NF90_PUT_ATT(ncid,varid3,"units","None") + ierr=NF90_PUT_ATT(ncid,varid3,"missing_value",undef) + ierr=NF90_PUT_ATT(ncid,varid3,"_FillValue",undef) + ierr=NF90_PUT_ATT(ncid,varid3,"cell_methods","time: point") + ierr=NF90_PUT_ATT(ncid,varid4,"long_name","skeb v pattern") + ierr=NF90_PUT_ATT(ncid,varid4,"units","None") + ierr=NF90_PUT_ATT(ncid,varid4,"missing_value",undef) + ierr=NF90_PUT_ATT(ncid,varid4,"_FillValue",undef) + ierr=NF90_PUT_ATT(ncid,varid4,"cell_methods","time: point") +endif +ierr=NF90_ENDDEF(ncid) +ierr=NF90_PUT_VAR(ncid,xt_var_id,grid_xt) +ierr=NF90_PUT_VAR(ncid,yt_var_id,grid_yt) +if (Model%do_skeb)then + ierr=NF90_PUT_VAR(ncid,zt_var_id,pressl) +endif +endif + +do i=1,nblks + if (Model%do_sppt)allocate(Coupling(i)%sppt_wts(blksz,nlevs)) + if (Model%do_shum)allocate(Coupling(i)%shum_wts(blksz,nlevs)) + if (Model%do_skeb)allocate(Coupling(i)%skebu_wts(blksz,nlevs)) + if (Model%do_skeb)allocate(Coupling(i)%skebv_wts(blksz,nlevs)) +enddo +do i=1,200 + Model%kdt=i + ts=i/4.0 + call run_stochastic_physics(Model, Grid, Coupling, nthreads) + if (Model%me.EQ.0) print*,'sppt_wts=',i,Coupling(1)%sppt_wts(1,20) + if (write_this_tile) then + if (Model%do_sppt)then + do j=1,ny + workg(:,j)=Coupling(j)%sppt_wts(:,20) + enddo + ierr=NF90_PUT_VAR(ncid,varid1,workg,(/1,1,i/)) + endif + if (Model%do_shum)then + do j=1,ny + workg(:,j)=Coupling(j)%shum_wts(:,1) + enddo + ierr=NF90_PUT_VAR(ncid,varid2,workg,(/1,1,i/)) + endif + if (Model%do_skeb)then + do k=1,nlevs + do j=1,ny + workg3d(:,j,k)=Coupling(j)%skebu_wts(:,k) + enddo + enddo + ierr=NF90_PUT_VAR(ncid,varid3,workg3d,(/1,1,1,i/)) + do k=1,nlevs + do j=1,ny + workg3d(:,j,k)=Coupling(j)%skebv_wts(:,k) + enddo + enddo + ierr=NF90_PUT_VAR(ncid,varid4,workg3d,(/1,1,1,i/)) + endif + ierr=NF90_PUT_VAR(ncid,time_var_id,ts,(/i/)) + endif +enddo +if (write_this_tile) ierr=NF90_CLOSE(ncid) +end +subroutine get_outfile(fname) +use stochy_namelist_def +character*80,intent(out) :: fname +character*4 :: s_ntrunc,s_lat,s_lon + write(s_ntrunc,'(I4)') ntrunc + write(s_lat,'(I4)') lat_s + write(s_lon,'(I4)') lon_s + fname=trim('workg_T'//trim(adjustl(s_ntrunc))//'_'//trim(adjustl(s_lon))//'x'//trim(adjustl(s_lat))) + return +end diff --git a/standalone_stochy_module.F90 b/standalone_stochy_module.F90 new file mode 100644 index 0000000..c33c71d --- /dev/null +++ b/standalone_stochy_module.F90 @@ -0,0 +1,91 @@ +module standalone_stochy_module + +use machine +implicit none +public +integer :: isc,jsc,iec,jec,isd,ied,jsd,jed + +type GFS_diag_type + real (kind=kind_phys), allocatable :: ca_out (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_deep (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_turb (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_shal (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_rad (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_micro (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca1 (:) + real (kind=kind_phys), allocatable :: ca2 (:) + real (kind=kind_phys), allocatable :: ca3 (:) +end type GFS_diag_type +type GFS_control_type + integer :: levs,me,nx,ny + integer,allocatable :: blksz(:) !< for explicit data blocking: block sizes of all blocks + real(kind=kind_phys) :: dtp !< physics timestep in seconds + real(kind=kind_phys) :: phour !< previous forecast hour + real(kind=kind_phys) :: sppt_amp !< amplitude of sppt (to go to cld scheme) + integer :: kdt !< current forecast iteration + logical :: do_sppt,do_shum,do_skeb,do_sfcperts,use_zmtnblck,do_rnda + integer :: skeb_npass,nsfcpert + character(len=65) :: fn_nml !< namelist filename + character(len=256),allocatable :: input_nml_file(:) !< character string containing full namelist + real(kind=kind_phys) :: pertz0(5),pertzt(5),pertshc(5),pertlai(5),pertvegf(5),pertalb(5) + !---cellular automata control parameters + integer :: nca !< number of independent cellular automata + integer :: nlives !< cellular automata lifetime + integer :: ncells !< cellular automata finer grid + real(kind=kind_phys) :: nfracseed !< cellular automata seed probability + integer :: nseed !< cellular automata seed frequency + logical :: do_ca !< cellular automata main switch + logical :: ca_sgs !< switch for sgs ca + logical :: ca_global !< switch for global ca + logical :: ca_smooth !< switch for gaussian spatial filter + logical :: isppt_deep !< switch for combination with isppt_deep. OBS! Switches off SPPT on other tendencies! + logical :: isppt_pbl + logical :: isppt_shal ! + logical :: pert_flux ! + logical :: pert_trigger ! + integer :: iseed_ca !< seed for random number generation in ca scheme + integer :: ca_amplitude !< seed for random number generation in ca scheme + integer :: nspinup !< number of iterations to spin up the ca + real(kind=kind_phys) :: nthresh !< threshold used for perturbed vertical velocity +end type GFS_control_type + + type GFS_statein_type + real (kind=kind_phys), allocatable :: pgr (:) !< surface pressure (Pa) real + real (kind=kind_phys), allocatable :: qgrs (:,:,:) !< layer mean tracer concentration + real (kind=kind_phys), allocatable :: vvl (:,:) !< layer mean vertical velocity in pa/sec + real (kind=kind_phys), allocatable :: prsl (:,:) !< model layer mean pressure Pa +end type GFS_statein_type + + +type GFS_init_type + integer :: nlunit + real(kind=kind_phys),allocatable :: ak(:),bk(:),xlon(:,:),xlat(:,:) + integer,allocatable :: blksz(:) !< for explicit data blocking: block sizes of all blocks +end type GFS_init_type + +type GFS_grid_type + real (kind=kind_phys),allocatable :: xlat (:) !< grid latitude in radians, default to pi/2 -> + real (kind=kind_phys),allocatable :: xlon (:) !< grid longitude in radians, default to pi/2 -> +end type GFS_grid_type + +type GFS_coupling_type + real (kind=kind_phys),allocatable :: shum_wts (:,:) + real (kind=kind_phys),allocatable :: sppt_wts (:,:) + real (kind=kind_phys),allocatable :: sppt_pattern(:) + real (kind=kind_phys),allocatable :: skebu_wts (:,:) + real (kind=kind_phys),allocatable :: skebv_wts (:,:) + real (kind=kind_phys),allocatable :: sfc_wts (:,:) + real (kind=kind_phys), allocatable :: cape (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_out (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_deep (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_turb (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_shal (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_rad (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca_micro (:) !< cellular automata fraction + real (kind=kind_phys), allocatable :: ca1 (:) + real (kind=kind_phys), allocatable :: ca2 (:) + real (kind=kind_phys), allocatable :: ca3 (:) + integer :: nsfcpert=6 !< number of sfc perturbations +end type GFS_coupling_type +end module standalone_stochy_module + diff --git a/stochastic_physics.F90 b/stochastic_physics.F90 index bd0b627..9b89902 100644 --- a/stochastic_physics.F90 +++ b/stochastic_physics.F90 @@ -1,13 +1,20 @@ +!>@brief The module 'stochastic_physics' is for initialization and running of +!! the stochastic physics random pattern generators module stochastic_physics implicit none private -public :: init_stochastic_physics, run_stochastic_physics +public :: init_stochastic_physics +public :: run_stochastic_physics contains +!>@brief The subroutine 'init_stochastic_physics' initializes the stochastic +!!pattern genertors +!>@details It reads the stochastic physics namelist (nam_stoch and nam_sfcperts) +!allocates and polulates the necessary arrays subroutine init_stochastic_physics(Model, Init_parm, ntasks, nthreads) use fv_mp_mod, only : is_master use stochy_internal_state_mod @@ -17,9 +24,13 @@ subroutine init_stochastic_physics(Model, Init_parm, ntasks, nthreads) use stochy_gg_def,only : colrad_a use stochy_namelist_def use physcons, only: con_pi -use spectral_layout_mod,only:me,ompthreads +use spectral_layout_mod,only:me,ompthreads,nodes use mpp_mod -use GFS_typedefs, only: GFS_control_type, GFS_init_type +#ifdef STOCHY_UNIT_TEST + use standalone_stochy_module, only: GFS_control_type, GFS_init_type +# else + use GFS_typedefs, only: GFS_control_type, GFS_init_type +#endif implicit none type(GFS_control_type), intent(inout) :: Model @@ -31,7 +42,7 @@ subroutine init_stochastic_physics(Model, Init_parm, ntasks, nthreads) integer :: iret real*8 :: PRSI(Model%levs),PRSL(Model%levs),dx real, allocatable :: skeb_vloc(:) -integer :: k,kflip,latghf,nodes,blk,k2 +integer :: k,kflip,latghf,blk,k2 character*2::proc ! Set/update shared variables in spectral_layout_mod @@ -87,7 +98,7 @@ subroutine init_stochastic_physics(Model, Init_parm, ntasks, nthreads) sl(k)= 0.5*(Init_parm%ak(k)/101300.+Init_parm%bk(k)+Init_parm%ak(k+1)/101300.0+Init_parm%bk(k+1)) ! si are now sigmas ! if(is_master())print*,'sl(k)',k,sl(k),Init_parm%ak(k),Init_parm%bk(k) enddo -if (do_sppt) then +if (Model%do_sppt) then allocate(vfact_sppt(Model%levs)) do k=1,Model%levs if (sl(k) .lt. sppt_sigtop1 .and. sl(k) .gt. sppt_sigtop2) then @@ -108,7 +119,7 @@ subroutine init_stochastic_physics(Model, Init_parm, ntasks, nthreads) enddo endif endif -if (do_skeb) then +if (Model%do_skeb) then !print*,'allocating skeb stuff',skeblevs allocate(vfact_skeb(Model%levs)) allocate(skeb_vloc(skeblevs)) ! local @@ -153,7 +164,7 @@ subroutine init_stochastic_physics(Model, Init_parm, ntasks, nthreads) skeb_vpts(:,2)=skeb_vpts(:,1)+1.0 endif -if (do_shum) then +if (Model%do_shum) then allocate(vfact_shum(Model%levs)) do k=1,Model%levs vfact_shum(k) = exp((sl(k)-1.)/shum_sigefold) @@ -187,7 +198,8 @@ subroutine init_stochastic_physics(Model, Init_parm, ntasks, nthreads) !print *,'done with init_stochastic_physics' end subroutine init_stochastic_physics - +!>@brief The subroutine 'run_stochastic_physics' updates the random patterns if +!!necessary subroutine run_stochastic_physics(Model, Grid, Coupling, nthreads) use fv_mp_mod, only : is_master use stochy_internal_state_mod @@ -198,7 +210,11 @@ subroutine run_stochastic_physics(Model, Grid, Coupling, nthreads) use stochy_namelist_def use spectral_layout_mod,only:me,ompthreads use mpp_mod +#ifdef STOCHY_UNIT_TEST +use standalone_stochy_module, only: GFS_control_type, GFS_grid_type, GFS_Coupling_type +#else use GFS_typedefs, only: GFS_control_type, GFS_grid_type, GFS_Coupling_type +#endif implicit none type(GFS_control_type), intent(in) :: Model type(GFS_grid_type), intent(in) :: Grid(:) @@ -213,7 +229,7 @@ subroutine run_stochastic_physics(Model, Grid, Coupling, nthreads) character*120 :: sfile character*6 :: STRFH -if ( (.NOT. do_sppt) .AND. (.NOT. do_shum) .AND. (.NOT. do_skeb) .AND. (.NOT. do_sfcperts) ) return +if ( (.NOT. Model%do_sppt) .AND. (.NOT. Model%do_shum) .AND. (.NOT. Model%do_skeb) ) return ! Update number of threads in shared variables in spectral_layout_mod and set block-related variables ompthreads = nthreads @@ -221,7 +237,7 @@ subroutine run_stochastic_physics(Model, Grid, Coupling, nthreads) maxlen = maxval(Model%blksz(:)) ! check to see if it is time to write out random patterns -if (Model%phour .EQ. fhstoch) then +if (fhstoch.GE. 0 .AND. MOD(Model%phour,fhstoch) .EQ. 0) then write(STRFH,FMT='(I6.6)') nint(Model%phour) sfile='stoch_out.F'//trim(STRFH) call dump_patterns(sfile) @@ -229,35 +245,41 @@ subroutine run_stochastic_physics(Model, Grid, Coupling, nthreads) allocate(tmp_wts(nblks,maxlen)) allocate(tmpu_wts(nblks,maxlen,Model%levs)) allocate(tmpv_wts(nblks,maxlen,Model%levs)) -if (do_sppt) then - call get_random_pattern_fv3(rpattern_sppt,nsppt,gis_stochy,Model,Grid,nblks,maxlen,tmp_wts) - DO blk=1,nblks - len=size(Grid(blk)%xlat,1) - DO k=1,Model%levs - Coupling(blk)%sppt_wts(:,k)=tmp_wts(blk,1:len)*vfact_sppt(k) +if (Model%do_sppt) then + if (mod(Model%kdt,nssppt) == 1 .or. nssppt == 1) then + call get_random_pattern_fv3(rpattern_sppt,nsppt,gis_stochy,Model,Grid,nblks,maxlen,tmp_wts) + DO blk=1,nblks + len=size(Grid(blk)%xlat,1) + DO k=1,Model%levs + Coupling(blk)%sppt_wts(:,k)=tmp_wts(blk,1:len)*vfact_sppt(k) + ENDDO + if (sppt_logit) Coupling(blk)%sppt_wts(:,:) = (2./(1.+exp(Coupling(blk)%sppt_wts(:,:))))-1. + Coupling(blk)%sppt_wts(:,:)= Coupling(blk)%sppt_wts(:,:)+1.0 ENDDO - if (sppt_logit) Coupling(blk)%sppt_wts(:,:) = (2./(1.+exp(Coupling(blk)%sppt_wts(:,:))))-1. - Coupling(blk)%sppt_wts(:,:)= Coupling(blk)%sppt_wts(:,:)+1.0 - ENDDO + endif endif -if (do_shum) then - call get_random_pattern_fv3(rpattern_shum,nshum,gis_stochy,Model,Grid,nblks,maxlen,tmp_wts) - DO blk=1,nblks - len=size(Grid(blk)%xlat,1) - DO k=1,Model%levs - Coupling(blk)%shum_wts(:,k)=tmp_wts(blk,1:len)*vfact_shum(k) +if (Model%do_shum) then + if (mod(Model%kdt,nsshum) == 1 .or. nsshum == 1) then + call get_random_pattern_fv3(rpattern_shum,nshum,gis_stochy,Model,Grid,nblks,maxlen,tmp_wts) + DO blk=1,nblks + len=size(Grid(blk)%xlat,1) + DO k=1,Model%levs + Coupling(blk)%shum_wts(:,k)=tmp_wts(blk,1:len)*vfact_shum(k) + ENDDO ENDDO - ENDDO + endif endif -if (do_skeb) then - call get_random_pattern_fv3_vect(rpattern_skeb,nskeb,gis_stochy,Model,Grid,nblks,maxlen,tmpu_wts,tmpv_wts) - DO blk=1,nblks - len=size(Grid(blk)%xlat,1) - DO k=1,Model%levs - Coupling(blk)%skebu_wts(:,k)=tmpu_wts(blk,1:len,k)*vfact_skeb(k) - Coupling(blk)%skebv_wts(:,k)=tmpv_wts(blk,1:len,k)*vfact_skeb(k) +if (Model%do_skeb) then + if (mod(Model%kdt,nsskeb) == 1 .or. nsskeb == 1) then + call get_random_pattern_fv3_vect(rpattern_skeb,nskeb,gis_stochy,Model,Grid,nblks,maxlen,tmpu_wts,tmpv_wts) + DO blk=1,nblks + len=size(Grid(blk)%xlat,1) + DO k=1,Model%levs + Coupling(blk)%skebu_wts(:,k)=tmpu_wts(blk,1:len,k)*vfact_skeb(k) + Coupling(blk)%skebv_wts(:,k)=tmpv_wts(blk,1:len,k)*vfact_skeb(k) + ENDDO ENDDO - ENDDO + endif endif deallocate(tmp_wts) deallocate(tmpu_wts) @@ -286,7 +308,11 @@ subroutine run_stochastic_physics_sfc(Model, Grid, Coupling) use stochy_resol_def , only : latg,lonf use stochy_namelist_def !use mpp_mod +#ifdef STOCHY_UNIT_TEST + use standalone_stochy_module, only: GFS_control_type, GFS_grid_type, GFS_Coupling_type +#else use GFS_typedefs, only: GFS_control_type, GFS_grid_type, GFS_Coupling_type +#endif implicit none type(GFS_control_type), intent(in) :: Model type(GFS_grid_type), intent(in) :: Grid(:) @@ -299,7 +325,7 @@ subroutine run_stochastic_physics_sfc(Model, Grid, Coupling) integer :: nblks, blk, len, maxlen character*120 :: sfile character*6 :: STRFH -if (.NOT. do_sfcperts) return +if (.NOT. Model%do_sfcperts) return ! Set block-related variables nblks = size(Model%blksz) @@ -307,7 +333,7 @@ subroutine run_stochastic_physics_sfc(Model, Grid, Coupling) allocate(tmpsfc_wts(nblks,maxlen,Model%nsfcpert)) ! mg, sfc-perts if (is_master()) then - print*,'In init_stochastic_physics: do_sfcperts ',do_sfcperts + print*,'In run_stochastic_physics_sfc' endif call get_random_pattern_sfc_fv3(rpattern_sfc,npsfc,gis_stochy,Model,Grid,nblks,maxlen,tmpsfc_wts) DO blk=1,nblks diff --git a/stochy_data_mod.F90 b/stochy_data_mod.F90 index f1cdf87..6291f8d 100644 --- a/stochy_data_mod.F90 +++ b/stochy_data_mod.F90 @@ -59,15 +59,15 @@ subroutine init_stochdata(nlevs,delt,input_nml_file,fn_nml,nlunit,iret) levs=nlevs iret=0 - if(is_master()) print*,'in init stochdata' call compns_stochy (me,size(input_nml_file,1),input_nml_file(:),fn_nml,nlunit,delt,iret) + if(is_master()) print*,'in init stochdata',nodes,lat_s if ( (.NOT. do_sppt) .AND. (.NOT. do_shum) .AND. (.NOT. do_skeb) .AND. (.NOT. do_sfcperts) ) return - if (nodes.GE.lat_s/2) then - lat_s=(int(nodes/12)+1)*24 - lon_s=lat_s*2 - ntrunc=lat_s-2 - if (is_master()) print*,'WARNING: spectral resolution is too low for number of mpi_tasks, resetting lon_s,lat_s,and ntrunc to',lon_s,lat_s,ntrunc - endif +! if (nodes.GE.lat_s/2) then +! lat_s=(int(nodes/12)+1)*24 +! lon_s=lat_s*2 +! ntrunc=lat_s-2 +! if (is_master()) print*,'WARNING: spectral resolution is too low for number of mpi_tasks, resetting lon_s,lat_s,and ntrunc to',lon_s,lat_s,ntrunc +! endif call initialize_spectral(gis_stochy, iret) if (iret/=0) return allocate(noise_e(len_trie_ls,2),noise_o(len_trio_ls,2)) @@ -139,10 +139,10 @@ subroutine init_stochdata(nlevs,delt,input_nml_file,fn_nml,nlunit,iret) spinup_efolds = 0 if (nsppt > 0) then if (is_master()) print *, 'Initialize random pattern for SPPT' - call patterngenerator_init(sppt_lscale,delt,sppt_tau,sppt,iseed_sppt,rpattern_sppt, & - lonf,latg,jcap,gis_stochy%ls_node,nsppt,1,0) + call patterngenerator_init(sppt_lscale,spptint,sppt_tau,sppt,iseed_sppt,rpattern_sppt, & + lonf,latg,jcap,gis_stochy%ls_node,nsppt,1,0,new_lscale) do n=1,nsppt - nspinup = spinup_efolds*sppt_tau(n)/delt + nspinup = spinup_efolds*sppt_tau(n)/spptint if (stochini) then call read_pattern(rpattern_sppt(n),1,stochlun) else @@ -171,10 +171,10 @@ subroutine init_stochdata(nlevs,delt,input_nml_file,fn_nml,nlunit,iret) endif if (nshum > 0) then if (is_master()) print *, 'Initialize random pattern for SHUM' - call patterngenerator_init(shum_lscale,delt,shum_tau,shum,iseed_shum,rpattern_shum, & - lonf,latg,jcap,gis_stochy%ls_node,nshum,1,0) + call patterngenerator_init(shum_lscale,shumint,shum_tau,shum,iseed_shum,rpattern_shum, & + lonf,latg,jcap,gis_stochy%ls_node,nshum,1,0,new_lscale) do n=1,nshum - nspinup = spinup_efolds*shum_tau(n)/delt + nspinup = spinup_efolds*shum_tau(n)/shumint if (stochini) then call read_pattern(rpattern_shum(n),1,stochlun) else @@ -204,14 +204,14 @@ subroutine init_stochdata(nlevs,delt,input_nml_file,fn_nml,nlunit,iret) if (nskeb > 0) then ! determine number of skeb levels to deal with temperoal/vertical correlations - skeblevs=nint(skeb_tau(1)/delt*skeb_vdof) + skeblevs=nint(skeb_tau(1)/skebint*skeb_vdof) ! backscatter noise. if (is_master()) print *, 'Initialize random pattern for SKEB',skeblevs - call patterngenerator_init(skeb_lscale,delt,skeb_tau,skeb,iseed_skeb,rpattern_skeb, & - lonf,latg,jcap,gis_stochy%ls_node,nskeb,skeblevs,skeb_varspect_opt) + call patterngenerator_init(skeb_lscale,skebint,skeb_tau,skeb,iseed_skeb,rpattern_skeb, & + lonf,latg,jcap,gis_stochy%ls_node,nskeb,skeblevs,skeb_varspect_opt,new_lscale) do n=1,nskeb do k=1,skeblevs - nspinup = spinup_efolds*skeb_tau(n)/delt + nspinup = spinup_efolds*skeb_tau(n)/skebint if (stochini) then call read_pattern(rpattern_skeb(n),k,stochlun) if (is_master()) print *, 'skeb read',k,rpattern_skeb(n)%spec_o(5,1,k) @@ -300,7 +300,7 @@ subroutine init_stochdata(nlevs,delt,input_nml_file,fn_nml,nlunit,iret) if (npsfc > 0) then pertsfc(1) = 1. call patterngenerator_init(sfc_lscale,delt,sfc_tau,pertsfc,iseed_sfc,rpattern_sfc, & - lonf,latg,jcap,gis_stochy%ls_node,npsfc,nsfcpert,0) + lonf,latg,jcap,gis_stochy%ls_node,npsfc,nsfcpert,0,new_lscale) do n=1,npsfc if (is_master()) print *, 'Initialize random pattern for SFC-PERTS',n do k=1,nsfcpert diff --git a/stochy_namelist_def.F90 b/stochy_namelist_def.F90 index 230145f..1a18d42 100644 --- a/stochy_namelist_def.F90 +++ b/stochy_namelist_def.F90 @@ -7,7 +7,7 @@ module stochy_namelist_def implicit none public - integer nsskeb,lon_s,lat_s,ntrunc + integer nssppt,nsshum,nsskeb,lon_s,lat_s,ntrunc ! pjp stochastic phyics integer skeb_varspect_opt,skeb_npass @@ -16,14 +16,15 @@ module stochy_namelist_def real(kind=kind_dbl_prec) :: skeb_sigtop1,skeb_sigtop2, & sppt_sigtop1,sppt_sigtop2,shum_sigefold, & skeb_vdof - real(kind=kind_dbl_prec) fhstoch,skeb_diss_smooth,skebint,skebnorm + real(kind=kind_dbl_prec) fhstoch,skeb_diss_smooth,spptint,shumint,skebint,skebnorm real(kind=kind_dbl_prec), dimension(5) :: skeb,skeb_lscale,skeb_tau real(kind=kind_dbl_prec), dimension(5) :: sppt,sppt_lscale,sppt_tau real(kind=kind_dbl_prec), dimension(5) :: shum,shum_lscale,shum_tau integer,dimension(5) ::skeb_vfilt integer(8),dimension(5) ::iseed_sppt,iseed_shum,iseed_skeb - logical stochini,sppt_logit - logical do_shum,do_sppt,do_skeb,use_zmtnblck + logical stochini,sppt_logit,new_lscale + logical use_zmtnblck + logical do_shum,do_sppt,do_skeb ! mg surface perturbations real(kind=kind_dbl_prec), dimension(5) :: sfc_lscale,sfc_tau diff --git a/stochy_patterngenerator.F90 b/stochy_patterngenerator.F90 index fff4157..dc36276 100644 --- a/stochy_patterngenerator.F90 +++ b/stochy_patterngenerator.F90 @@ -36,11 +36,12 @@ module stochy_patterngenerator_mod subroutine patterngenerator_init(lscale, delt, tscale, stdev, iseed, rpattern,& nlon, nlat, jcap, ls_node, npatterns,& - nlevs, varspect_opt) + nlevs, varspect_opt,new_lscale) real(kind_dbl_prec), intent(in),dimension(npatterns) :: lscale,tscale,stdev real, intent(in) :: delt integer, intent(in) :: nlon,nlat,jcap,npatterns,varspect_opt integer, intent(in) :: ls_node(ls_dim,3),nlevs + logical, intent(in) :: new_lscale type(random_pattern), intent(out), dimension(npatterns) :: rpattern integer(8), intent(inout) :: iseed(npatterns) integer m,j,l,n,nm,nn,np,indev1,indev2,indod1,indod2 @@ -162,9 +163,9 @@ subroutine patterngenerator_init(lscale, delt, tscale, stdev, iseed, rpattern,& if (is_master()) then print *,'WARNING: illegal value for varspect_opt (should be 0 or 1), using 0 (gaussian spectrum)...' endif - call setvarspect(rpattern(np),0) + call setvarspect(rpattern(np),0,new_lscale) else - call setvarspect(rpattern(np),varspect_opt) + call setvarspect(rpattern(np),varspect_opt,new_lscale) endif enddo ! n=1,npatterns end subroutine patterngenerator_init @@ -281,15 +282,17 @@ subroutine patterngenerator_advance(rpattern,k,skeb_first_call) enddo end subroutine patterngenerator_advance - subroutine setvarspect(rpattern,varspect_opt) + subroutine setvarspect(rpattern,varspect_opt,new_lscale) ! define variance spectrum (isotropic covariance) ! normalized to unit global variance type(random_pattern), intent(inout) :: rpattern integer, intent(in) :: varspect_opt + logical, intent(in) :: new_lscale integer :: n complex(kind_evod) noise(ndimspec) - real(kind_evod) var,rerth + real(kind_evod) var,rerth,inv_rerth_sq rerth =6.3712e+6 ! radius of earth (m) + inv_rerth_sq=1.0/(rerth**2) ! 1d variance spectrum (as a function of total wavenumber) if (varspect_opt == 0) then ! gaussian ! rpattern%lengthscale is interpreted as an efolding length @@ -298,7 +301,12 @@ subroutine setvarspect(rpattern,varspect_opt) rpattern%varspectrum1d(n) = exp(-rpattern%lengthscale**2*(float(n)*(float(n)+1.))/(4.*rerth**2)) enddo ! scaling factors for spectral coeffs of white noise pattern with unit variance - rpattern%varspectrum = sqrt(ntrunc*exp(rpattern%lengthscale**2*rpattern%lap/(4.*rerth**2))) + if (new_lscale) then + !fix for proper lengthscale + rpattern%varspectrum = ntrunc*exp((rpattern%lengthscale*0.25)**2*rpattern%lap*inv_rerth_sq) + else + rpattern%varspectrum = sqrt(ntrunc*exp(rpattern%lengthscale**2*rpattern%lap/(4.*rerth**2))) + endif else if (varspect_opt == 1) then ! power law ! rpattern%lengthscale is interpreted as a power, not a length. do n=0,ntrunc diff --git a/sumfln_stochy.f b/sumfln_stochy.f index 91a751f..cbbfd23 100644 --- a/sumfln_stochy.f +++ b/sumfln_stochy.f @@ -18,7 +18,8 @@ subroutine sumfln_stochy(flnev,flnod,lat1s,plnev,plnod, use machine use spectral_layout_mod, only : num_parthds_stochy => ompthreads !or : use fv_mp_mod ? - use mpp_mod, only: mpp_npes, mpp_alltoall, mpp_get_current_pelist + use mpp_mod, only: mpp_pe,mpp_npes, mpp_alltoall, + & mpp_get_current_pelist implicit none ! @@ -70,11 +71,11 @@ subroutine sumfln_stochy(flnev,flnod,lat1s,plnev,plnod, ! real(kind=4),dimension(2*nvars*ls_dim*workdim*nodes):: ! & work1dr,work1ds real(kind=4),pointer:: work1dr(:),work1ds(:) - integer, dimension(jcap+1) :: kpts, kptr, sendcounts, recvcounts, + integer, dimension(nodes) :: kpts, kptr, sendcounts, recvcounts, & sdispls ! integer ierr,ilat,ipt_ls, lmax,lval,i,jj,lonl,nv - integer node,nvar,arrsz + integer node,nvar,arrsz,my_pe integer ilat_list(nodes) ! for OMP buffer copy ! ! statement functions @@ -93,6 +94,7 @@ subroutine sumfln_stochy(flnev,flnod,lat1s,plnev,plnod, num_threads = min(num_parthds_stochy,nvars) nvar_thread_max = (nvars+num_threads-1)/num_threads npes = mpp_npes() + my_pe=mpp_pe() allocate(pelist(0:npes-1)) call mpp_get_current_pelist(pelist) kpts = 0 @@ -187,7 +189,6 @@ subroutine sumfln_stochy(flnev,flnod,lat1s,plnev,plnod, do node = 1, nodes - 1 ilat_list(node+1) = ilat_list(node) + lats_nodes(node) end do - !$omp parallel do private(node,jj,ilat,lat,ipt_ls,nvar,kn,n2) do node=1,nodes do jj=1,lats_nodes(node)