From b7a44c5de96da3ae036af0a5695769c1380df55f Mon Sep 17 00:00:00 2001 From: Jeffrey Whitaker Date: Tue, 4 Feb 2020 11:45:46 -0700 Subject: [PATCH 1/4] parallel netcdf IO (#41) * netcdf parallel writing and lon/lat in netcdf file * some changes to get it to (almost) run on hera * lat should be in degrees - remove conversion to radians. * import updates from jswhit/fv3atm * more bug fixes - now works on hera * specify collective access if compression turned on. * use classic model * bug fixes for parallel IO with compression * fix calculation of max compression error * turn off shuffle filter * code simplification * remove debug print * don't use parallel IO for 2d file (since it seems to increase run time) * allow multiple values of output_file, as long as they all start with 'netcdf' * use default chunksize for 2d vars * delete commented out macro ESMF_ERR_ABORT * delete rad2dg * add module_write_netcdf_parallel.F90 * add option to build without parallel netcdf (-DNO_PARALLEL_NETCDF) * fix typo * stub file for building without parallel netcdf lib * allow chunksizes for 2d arrays to be set in model_configure (ichunk2d,jchunk2d) Default is size of array on each write task. * add ichunk3d,jchunk3d,kchunk3d to specify 3d chunksizes. Default is now ichunk3d,jchunk3d same as array size on each PE, kchunk3d=nlevs This results in the fastest writes on hera. * fix typo * put ifdefs in module_write_netcdf_parallel.F90 so no stub file needed * don't need this file anymore * remove module_write_netcdf_parallel_stub.o target * use specified chunksizes for serial IO. If chunksize parameter negative, let netcdf library choose defaults. * update comments * get output_file without esmf call error * syntax fix * fix stub interface in module_write_netcdf_parallel.F90 for cmake build Co-authored-by: junwang-noaa <37633869+junwang-noaa@users.noreply.github.com> --- fv3_cap.F90 | 45 +- io/CMakeLists.txt | 5 + io/makefile | 5 +- io/module_fv3_io_def.F90 | 3 +- io/module_write_netcdf.F90 | 106 ++--- io/module_write_netcdf_parallel.F90 | 617 ++++++++++++++++++++++++++++ io/module_wrt_grid_comp.F90 | 96 ++++- 7 files changed, 805 insertions(+), 72 deletions(-) create mode 100644 io/module_write_netcdf_parallel.F90 diff --git a/fv3_cap.F90 b/fv3_cap.F90 index a099ae32e..c211a9044 100644 --- a/fv3_cap.F90 +++ b/fv3_cap.F90 @@ -37,7 +37,8 @@ module fv3gfs_cap_mod wrttasks_per_group, n_group, & lead_wrttask, last_wrttask, & output_grid, output_file, & - imo, jmo, write_nemsioflip, & + imo,jmo,ichunk2d,jchunk2d,write_nemsioflip,& + ichunk3d,jchunk3d,kchunk3d, & write_fsyncflag, nsout_io, & cen_lon, cen_lat, ideflate, & lon1, lat1, lon2, lat2, dlon, dlat, & @@ -235,8 +236,9 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) integer,dimension(6) :: date, date_init integer :: mpi_comm_atm - integer :: i, j, k, io_unit, urc + integer :: i, j, k, io_unit, urc, ierr integer :: petcount, mype + integer :: num_output_file logical :: isPetLocal logical :: OPENED character(ESMF_MAXSTR) :: name @@ -307,6 +309,14 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call ESMF_ConfigGetAttribute(config=CF,value=iau_offset,default=0,label ='iau_offset:',rc=rc) if (iau_offset < 0) iau_offset=0 + ! chunksizes for netcdf_parallel + call ESMF_ConfigGetAttribute(config=CF,value=ichunk2d,default=0,label ='ichunk2d:',rc=rc) + call ESMF_ConfigGetAttribute(config=CF,value=jchunk2d,default=0,label ='jchunk2d:',rc=rc) + call ESMF_ConfigGetAttribute(config=CF,value=ichunk3d,default=0,label ='ichunk3d:',rc=rc) + call ESMF_ConfigGetAttribute(config=CF,value=jchunk3d,default=0,label ='jchunk3d:',rc=rc) + call ESMF_ConfigGetAttribute(config=CF,value=kchunk3d,default=0,label ='kchunk3d:',rc=rc) + + ! zlib compression flag call ESMF_ConfigGetAttribute(config=CF,value=ideflate,default=0,label ='ideflate:',rc=rc) if (ideflate < 0) ideflate=0 @@ -346,8 +356,33 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) CALL ESMF_ConfigGetAttribute(config=CF,value=filename_base(i), rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return enddo - if(mype == 0) print *,'af nems config,num_files=',num_files, & - 'filename_base=',filename_base + + allocate(output_file(num_files)) + num_output_file = ESMF_ConfigGetLen(config=CF, label ='output_file:',rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + if (num_files == num_output_file) then + CALL ESMF_ConfigGetAttribute(CF,valueList=output_file,label='output_file:', & + count=num_files, rc=RC) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + do i = 1, num_files + if(output_file(i) /= "netcdf" .and. output_file(i) /= "netcdf_parallel") then + write(0,*)"fv3_cap.F90: only netcdf and netcdf_parallel are allowed for multiple values of output_file" + call ESMF_Finalize(endflag=ESMF_END_ABORT) + endif + enddo + else if ( num_output_file == 1) then + CALL ESMF_ConfigGetAttribute(CF,valuelist=output_file,label='output_file:', count=1, rc=RC) + output_file(1:num_files) = output_file(1) + else + output_file(1:num_files) = 'netcdf' + endif + if(mype == 0) then + print *,'af nems config,num_files=',num_files + do i=1,num_files + print *,'num_file=',i,'filename_base= ',trim(filename_base(i)),& + ' output_file= ',trim(output_file(i)) + enddo + endif ! ! variables for alarms call ESMF_ConfigGetAttribute(config=CF, value=nfhout, label ='nfhout:', rc=rc) @@ -359,10 +394,8 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ! variables for I/O options call ESMF_ConfigGetAttribute(config=CF, value=output_grid, label ='output_grid:',rc=rc) - call ESMF_ConfigGetAttribute(config=CF, value=output_file, label ='output_file:',rc=rc) if (mype == 0) then print *,'output_grid=',trim(output_grid) - print *,'output_file=',trim(output_file) end if write_nemsioflip =.false. write_fsyncflag =.false. diff --git a/io/CMakeLists.txt b/io/CMakeLists.txt index 5eecdd9ea..e2cedc43d 100644 --- a/io/CMakeLists.txt +++ b/io/CMakeLists.txt @@ -5,6 +5,10 @@ else() add_definitions(-DNO_INLINE_POST) endif() +if(NOT PARALLEL_NETCDF) + add_definitions(-DNO_PARALLEL_NETCDF) +endif() + add_library( io @@ -12,6 +16,7 @@ add_library( FV3GFS_io.F90 module_write_nemsio.F90 module_write_netcdf.F90 + module_write_netcdf_parallel.F90 module_fv3_io_def.F90 module_write_internal_state.F90 module_wrt_grid_comp.F90 diff --git a/io/makefile b/io/makefile index 82e61f68f..b6a3946b0 100644 --- a/io/makefile +++ b/io/makefile @@ -41,6 +41,7 @@ SRCS_F90 = \ $(POST_SRC) \ ./module_write_nemsio.F90 \ ./module_write_netcdf.F90 \ + ./module_write_netcdf_parallel.F90 \ ./module_fv3_io_def.F90 \ ./module_write_internal_state.F90 \ ./module_wrt_grid_comp.F90 @@ -69,7 +70,9 @@ post_nems_routines.o: post_nems_routines.F90 module_write_nemsio.o: module_write_nemsio.F90 $(FC) $(CPPDEFS) $(CPPFLAGS) $(FPPFLAGS) $(FFLAGS) $(OTHERFLAGS) $(OTHER_FFLAGS) $(ESMF_INC) $(NEMSIOINC) -c module_write_nemsio.F90 module_write_netcdf.o: module_write_netcdf.F90 - $(FC) $(CPPDEFS) $(CPPFLAGS) $(FPPFLAGS) $(FFLAGS) $(OTHERFLAGS) $(OTHER_FFLAGS) $(ESMF_INC) $(NEMSIOINC) -c module_write_netcdf.F90 + $(FC) $(CPPDEFS) $(CPPFLAGS) $(FPPFLAGS) $(FFLAGS) $(OTHERFLAGS) $(OTHER_FFLAGS) $(ESMF_INC) -c module_write_netcdf.F90 +module_write_netcdf_parallel.o: module_write_netcdf_parallel.F90 + $(FC) $(CPPDEFS) $(CPPFLAGS) $(FPPFLAGS) $(FFLAGS) $(OTHERFLAGS) $(OTHER_FFLAGS) $(ESMF_INC) -c module_write_netcdf_parallel.F90 module_write_internal_state.o: module_write_internal_state.F90 $(FC) $(CPPDEFS) $(CPPFLAGS) $(FPPFLAGS) $(FFLAGS) $(OTHERFLAGS) $(OTHER_FFLAGS) $(ESMF_INC) -c module_write_internal_state.F90 module_wrt_grid_comp.o: module_wrt_grid_comp.F90 diff --git a/io/module_fv3_io_def.F90 b/io/module_fv3_io_def.F90 index 17e9f308d..b31ab666b 100644 --- a/io/module_fv3_io_def.F90 +++ b/io/module_fv3_io_def.F90 @@ -17,13 +17,14 @@ module module_fv3_io_def integer :: num_files character(255) :: app_domain character(255) :: output_grid - character(255) :: output_file integer :: imo,jmo + integer :: ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d integer :: nbdlphys integer :: nsout_io, iau_offset, ideflate, nbits real :: cen_lon, cen_lat, lon1, lat1, lon2, lat2, dlon, dlat real :: stdlat1, stdlat2, dx, dy character(255),dimension(:),allocatable :: filename_base + character(255),dimension(:),allocatable :: output_file ! integer,dimension(:),allocatable :: lead_wrttask, last_wrttask ! diff --git a/io/module_write_netcdf.F90 b/io/module_write_netcdf.F90 index fc59c75c9..270e36ec6 100644 --- a/io/module_write_netcdf.F90 +++ b/io/module_write_netcdf.F90 @@ -1,7 +1,3 @@ -!#define ESMF_ERR_ABORT(rc) \ -!if (rc /= ESMF_SUCCESS) write(0,*) 'rc=',rc,__FILE__,__LINE__; \ -! if (ESMF_LogFoundError(rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT) - #define ESMF_ERR_RETURN(rc) if (ESMF_LogFoundError(rc, msg="Breaking out of subroutine", line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT) #define NC_ERR_STOP(status) \ @@ -22,7 +18,7 @@ module module_write_netcdf contains !---------------------------------------------------------------------------------------- - subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, rc) + subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d, rc) ! type(ESMF_FieldBundle), intent(in) :: fieldbundle type(ESMF_FieldBundle), intent(in) :: wrtfb @@ -30,6 +26,7 @@ subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, rc integer, intent(in) :: mpi_comm integer, intent(in) :: mype integer, intent(in) :: im, jm + integer, intent(in) :: ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d integer, optional,intent(out) :: rc ! !** local vars @@ -69,9 +66,7 @@ subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, rc integer :: im_dimid, jm_dimid, pfull_dimid, phalf_dimid, time_dimid integer :: im_varid, jm_varid, lm_varid, time_varid, lon_varid, lat_varid integer, dimension(:), allocatable :: varids -! -!! -! + logical shuffle call ESMF_FieldBundleGet(fieldbundle, fieldCount=fieldCount, rc=rc); ESMF_ERR_RETURN(rc) @@ -117,15 +112,10 @@ subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, rc ! create netcdf file and enter define mode if (mype==0) then - if (ideflate == 0) then - ncerr = nf90_create(trim(filename), cmode=IOR(IOR(NF90_CLOBBER,NF90_64BIT_OFFSET),NF90_SHARE), & - ncid=ncid); NC_ERR_STOP(ncerr) - ncerr = nf90_set_fill(ncid, NF90_NOFILL, oldMode); NC_ERR_STOP(ncerr) - else - ncerr = nf90_create(trim(filename), cmode=IOR(IOR(NF90_CLOBBER,NF90_NETCDF4),NF90_CLASSIC_MODEL), & - ncid=ncid); NC_ERR_STOP(ncerr) - ncerr = nf90_set_fill(ncid, NF90_NOFILL, oldMode); NC_ERR_STOP(ncerr) - endif + ncerr = nf90_create(trim(filename),& + cmode=IOR(IOR(NF90_CLOBBER,NF90_NETCDF4),NF90_CLASSIC_MODEL),& + ncid=ncid); NC_ERR_STOP(ncerr) + ncerr = nf90_set_fill(ncid, NF90_NOFILL, oldMode); NC_ERR_STOP(ncerr) ! define dimensions ncerr = nf90_def_dim(ncid, "grid_xt", im, im_dimid); NC_ERR_STOP(ncerr) @@ -158,32 +148,53 @@ subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, rc if (fldlev(i) == 1) then if (typekind == ESMF_TYPEKIND_R4) then if (ideflate > 0) then - ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & - (/im_dimid,jm_dimid,time_dimid/), varids(i), & - shuffle=.true.,deflate_level=ideflate, & - chunksizes=(/im,jm,1/)); NC_ERR_STOP(ncerr) + if (ichunk2d < 0 .or. jchunk2d < 0) then + ! let netcdf lib choose chunksize + ! shuffle filter on for 2d fields (lossless compression) + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,time_dimid/), varids(i), & + shuffle=.true.,deflate_level=ideflate); NC_ERR_STOP(ncerr) + else + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,time_dimid/), varids(i), & + shuffle=.true.,deflate_level=ideflate,& + chunksizes=(/ichunk2d,jchunk2d,1/),cache_size=40*im*jm); NC_ERR_STOP(ncerr) + endif else - ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & - (/im_dimid,jm_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) endif else if (typekind == ESMF_TYPEKIND_R8) then - ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, & - (/im_dimid,jm_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) + ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, & + (/im_dimid,jm_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) else - write(0,*)'Unsupported typekind ', typekind - stop + write(0,*)'Unsupported typekind ', typekind + stop end if else if (fldlev(i) > 1) then if (typekind == ESMF_TYPEKIND_R4) then if (ideflate > 0) then - ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & - (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i), & - shuffle=.false.,deflate_level=ideflate, & - chunksizes=(/im,jm,1,1/)); NC_ERR_STOP(ncerr) + ! shuffle filter off for 3d fields using lossy compression + if (nbits > 0) then + shuffle=.false. + else + shuffle=.true. + endif + if (ichunk3d < 0 .or. jchunk3d < 0 .or. kchunk3d < 0) then + ! let netcdf lib choose chunksize + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i), & + shuffle=shuffle,deflate_level=ideflate); NC_ERR_STOP(ncerr) + else + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i), & + shuffle=shuffle,deflate_level=ideflate,& + chunksizes=(/ichunk3d,jchunk3d,kchunk3d,1/)); NC_ERR_STOP(ncerr) + endif else ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & - (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) - endif + (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) + endif else if (typekind == ESMF_TYPEKIND_R8) then ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, & (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) @@ -224,7 +235,7 @@ subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, rc call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", & name=trim(attName), value=varr8val, & rc=rc); ESMF_ERR_RETURN(rc) - if (trim(attName) /= '_FillValue' ) then + if (trim(attName) /= '_FillValue') then ! FIXME: _FillValue must be cast to var type for recent versions of netcdf ncerr = nf90_put_att(ncid, varids(i), trim(attName), varr8val); NC_ERR_STOP(ncerr) endif @@ -241,23 +252,23 @@ subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, rc end do ! i=1,fieldCount -! write grid_xt, grid_yt attributes + ! write grid_xt, grid_yt attributes if (trim(output_grid) == 'gaussian_grid' .or. & trim(output_grid) == 'regional_latlon') then - ncerr = nf90_put_att(ncid, im_varid, "long_name", "T-cell longitude"); NC_ERR_STOP(ncerr) - ncerr = nf90_put_att(ncid, im_varid, "units", "degrees_E"); NC_ERR_STOP(ncerr) - ncerr = nf90_put_att(ncid, jm_varid, "long_name", "T-cell latiitude"); NC_ERR_STOP(ncerr) - ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees_N"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, im_varid, "long_name", "T-cell longitude"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, im_varid, "units", "degrees_E"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "long_name", "T-cell latiitude"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees_N"); NC_ERR_STOP(ncerr) else if (trim(output_grid) == 'rotated_latlon') then - ncerr = nf90_put_att(ncid, im_varid, "long_name", "rotated T-cell longiitude"); NC_ERR_STOP(ncerr) - ncerr = nf90_put_att(ncid, im_varid, "units", "degrees"); NC_ERR_STOP(ncerr) - ncerr = nf90_put_att(ncid, jm_varid, "long_name", "rotated T-cell latiitude"); NC_ERR_STOP(ncerr) - ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, im_varid, "long_name", "rotated T-cell longiitude"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, im_varid, "units", "degrees"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "long_name", "rotated T-cell latiitude"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees"); NC_ERR_STOP(ncerr) else if (trim(output_grid) == 'lambert_conformal') then - ncerr = nf90_put_att(ncid, im_varid, "long_name", "x-coordinate of projection"); NC_ERR_STOP(ncerr) - ncerr = nf90_put_att(ncid, im_varid, "units", "meters"); NC_ERR_STOP(ncerr) - ncerr = nf90_put_att(ncid, jm_varid, "long_name", "y-coordinate of projection"); NC_ERR_STOP(ncerr) - ncerr = nf90_put_att(ncid, jm_varid, "units", "meters"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, im_varid, "long_name", "x-coordinate of projection"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, im_varid, "units", "meters"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "long_name", "y-coordinate of projection"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "units", "meters"); NC_ERR_STOP(ncerr) endif ncerr = nf90_enddef(ncid); NC_ERR_STOP(ncerr) @@ -377,6 +388,7 @@ subroutine write_netcdf(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, rc deallocate(fcstField) deallocate(varids) + deallocate(compress_err) if (mype==0) then ncerr = nf90_close(ncid=ncid); NC_ERR_STOP(ncerr) diff --git a/io/module_write_netcdf_parallel.F90 b/io/module_write_netcdf_parallel.F90 new file mode 100644 index 000000000..7d7dbb0f7 --- /dev/null +++ b/io/module_write_netcdf_parallel.F90 @@ -0,0 +1,617 @@ +#define ESMF_ERR_RETURN(rc) if (ESMF_LogFoundError(rc, msg="Breaking out of subroutine", line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT) + +#define NC_ERR_STOP(status) \ + if (status /= nf90_noerr) write(0,*) "line ", __LINE__, trim(nf90_strerror(status)); \ + if (status /= nf90_noerr) call ESMF_Finalize(endflag=ESMF_END_ABORT) + +module module_write_netcdf_parallel + + use esmf + use netcdf + use module_fv3_io_def,only : ideflate, nbits, & + output_grid,dx,dy,lon1,lat1,lon2,lat2 + use mpi + + implicit none + private + public write_netcdf_parallel + + contains + +#ifdef NO_PARALLEL_NETCDF +!---------------------------------------------------------------------------------------- + subroutine write_netcdf_parallel(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, ichunk2d, jchunk2d, ichunk3d, jchunk3d, kchunk3d, rc) + type(ESMF_FieldBundle), intent(in) :: fieldbundle + type(ESMF_FieldBundle), intent(in) :: wrtfb + character(*), intent(in) :: filename + integer, intent(in) :: mpi_comm + integer, intent(in) :: mype + integer, intent(in) :: im, jm, ichunk2d, jchunk2d, & + ichunk3d, jchunk3d, kchunk3d + integer, optional,intent(out) :: rc + print *,'in stub write_netcdf_parallel - model not built with parallel netcdf support, return' + end subroutine write_netcdf_parallel +#else +!---------------------------------------------------------------------------------------- + subroutine write_netcdf_parallel(fieldbundle, wrtfb, filename, mpi_comm, mype, im, jm, ichunk2d, jchunk2d, ichunk3d, jchunk3d, kchunk3d, rc) +! + type(ESMF_FieldBundle), intent(in) :: fieldbundle + type(ESMF_FieldBundle), intent(in) :: wrtfb + character(*), intent(in) :: filename + integer, intent(in) :: mpi_comm + integer, intent(in) :: mype + integer, intent(in) :: im, jm, ichunk2d, jchunk2d, & + ichunk3d, jchunk3d, kchunk3d + integer, optional,intent(out) :: rc +! +!** local vars + integer :: i,j,m,n,k,istart,iend,jstart,jend,i1,i2,j1,j2,k1,k2 + integer :: lm + + integer, dimension(:), allocatable :: fldlev + real(ESMF_KIND_R4), dimension(:,:), pointer :: arrayr4 + real(ESMF_KIND_R8), dimension(:,:), pointer :: arrayr8 + real(ESMF_KIND_R4), dimension(:,:,:), pointer :: arrayr4_3d,arrayr4_3d_save + real(ESMF_KIND_R8), dimension(:,:,:), pointer :: arrayr8_3d + + real(8) x(im),y(jm) + integer :: fieldCount, fieldDimCount, gridDimCount + integer, dimension(:), allocatable :: ungriddedLBound, ungriddedUBound + + type(ESMF_Field), allocatable :: fcstField(:) + type(ESMF_TypeKind_Flag) :: typekind + type(ESMF_TypeKind_Flag) :: attTypeKind + type(ESMF_Grid) :: wrtgrid + type(ESMF_Array) :: array + + integer :: attcount + character(len=ESMF_MAXSTR) :: attName, fldName + integer :: totalLBound2d(2),totalUBound2d(2),totalLBound3d(3),totalUBound3d(3) + + integer :: varival + real(4) :: varr4val, scale_fact, offset, dataMin, dataMax + real(4), allocatable, dimension(:) :: compress_err + real(8) :: varr8val + character(len=ESMF_MAXSTR) :: varcval + + character(128) :: time_units + + integer :: ncerr,ierr + integer :: ncid + integer :: oldMode + integer :: im_dimid, jm_dimid, pfull_dimid, phalf_dimid, time_dimid + integer :: im_varid, jm_varid, lm_varid, time_varid, lon_varid, lat_varid + integer, dimension(:), allocatable :: varids + logical shuffle +! + call ESMF_FieldBundleGet(fieldbundle, fieldCount=fieldCount, rc=rc); ESMF_ERR_RETURN(rc) + + allocate(compress_err(fieldCount)); compress_err=-999. + allocate(fldlev(fieldCount)) ; fldlev = 0 + allocate(fcstField(fieldCount)) + allocate(varids(fieldCount)) + + call ESMF_FieldBundleGet(fieldbundle, fieldList=fcstField, grid=wrtGrid, & +! itemorderflag=ESMF_ITEMORDER_ADDORDER, & + rc=rc); ESMF_ERR_RETURN(rc) + + call ESMF_GridGet(wrtgrid, dimCount=gridDimCount, rc=rc); ESMF_ERR_RETURN(rc) + + do i=1,fieldCount + call ESMF_FieldGet(fcstField(i), dimCount=fieldDimCount, rc=rc); ESMF_ERR_RETURN(rc) + if (fieldDimCount > 3) then + write(0,*)"write_netcdf: Only 2D and 3D fields are supported!" + stop + end if + if (fieldDimCount > gridDimCount) then + allocate(ungriddedLBound(fieldDimCount-gridDimCount)) + allocate(ungriddedUBound(fieldDimCount-gridDimCount)) + call ESMF_FieldGet(fcstField(i), & + ungriddedLBound=ungriddedLBound, & + ungriddedUBound=ungriddedUBound, rc=rc); ESMF_ERR_RETURN(rc) + fldlev(i) = ungriddedUBound(fieldDimCount-gridDimCount) - & + ungriddedLBound(fieldDimCount-gridDimCount) + 1 + deallocate(ungriddedLBound) + deallocate(ungriddedUBound) + else if (fieldDimCount == 2) then + fldlev(i) = 1 + end if + end do + + lm = maxval(fldlev(:)) + +! create netcdf file for parallel access + + ncerr = nf90_create(trim(filename),& + cmode=IOR(IOR(NF90_CLOBBER,NF90_NETCDF4),NF90_CLASSIC_MODEL),& + comm=mpi_comm, info = MPI_INFO_NULL, ncid=ncid); NC_ERR_STOP(ncerr) +! disable auto filling. + ncerr = nf90_set_fill(ncid, NF90_NOFILL, oldMode); NC_ERR_STOP(ncerr) + + ! define dimensions + ncerr = nf90_def_dim(ncid, "grid_xt", im, im_dimid); NC_ERR_STOP(ncerr) + ncerr = nf90_def_dim(ncid, "grid_yt", jm, jm_dimid); NC_ERR_STOP(ncerr) + ! define coordinate variables + ncerr = nf90_def_var(ncid, "grid_xt", NF90_DOUBLE, im_dimid, im_varid); NC_ERR_STOP(ncerr) + ncerr = nf90_var_par_access(ncid, im_varid, NF90_INDEPENDENT) + ncerr = nf90_def_var(ncid, "lon", NF90_DOUBLE, (/im_dimid,jm_dimid/), lon_varid); NC_ERR_STOP(ncerr) + !ncerr = nf90_var_par_access(ncid, lon_varid, NF90_INDEPENDENT) + ncerr = nf90_put_att(ncid, lon_varid, "long_name", "T-cell longitude"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, lon_varid, "units", "degrees_E"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, im_varid, "cartesian_axis", "X"); NC_ERR_STOP(ncerr) + ncerr = nf90_def_var(ncid, "grid_yt", NF90_DOUBLE, jm_dimid, jm_varid); NC_ERR_STOP(ncerr) + ncerr = nf90_var_par_access(ncid, jm_varid, NF90_INDEPENDENT) + ncerr = nf90_def_var(ncid, "lat", NF90_DOUBLE, (/im_dimid,jm_dimid/), lat_varid); NC_ERR_STOP(ncerr) + ncerr = nf90_var_par_access(ncid, lat_varid, NF90_INDEPENDENT) + ncerr = nf90_put_att(ncid, lat_varid, "long_name", "T-cell latitude"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, lat_varid, "units", "degrees_N"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "cartesian_axis", "Y"); NC_ERR_STOP(ncerr) + + if (lm > 1) then + call add_dim(ncid, "pfull", pfull_dimid, wrtgrid, rc) + call add_dim(ncid, "phalf", phalf_dimid, wrtgrid, rc) + end if + + call add_dim(ncid, "time", time_dimid, wrtgrid, rc) + + call get_global_attr(wrtfb, ncid, rc) + + do i=1, fieldCount + call ESMF_FieldGet(fcstField(i), name=fldName, typekind=typekind, rc=rc); ESMF_ERR_RETURN(rc) + + ! define variables + if (fldlev(i) == 1) then + if (typekind == ESMF_TYPEKIND_R4) then + if (ideflate > 0) then + if (ichunk2d < 0 .or. jchunk2d < 0) then + ! let netcdf lib choose chunksize + ! shuffle filter on for 2d fields (lossless compression) + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,time_dimid/), varids(i), & + shuffle=.true.,deflate_level=ideflate); NC_ERR_STOP(ncerr) + else + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,time_dimid/), varids(i), & + shuffle=.true.,deflate_level=ideflate,& + chunksizes=(/ichunk2d,jchunk2d,1/)); NC_ERR_STOP(ncerr) + endif + ! compression filters require collective access. + ncerr = nf90_var_par_access(ncid, varids(i), NF90_COLLECTIVE) + else + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) + ncerr = nf90_var_par_access(ncid, varids(i), NF90_INDEPENDENT) + endif + else if (typekind == ESMF_TYPEKIND_R8) then + ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, & + (/im_dimid,jm_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) + ncerr = nf90_var_par_access(ncid, varids(i), NF90_INDEPENDENT) + else + write(0,*)'Unsupported typekind ', typekind + stop + end if + else if (fldlev(i) > 1) then + if (typekind == ESMF_TYPEKIND_R4) then + if (ideflate > 0) then + ! shuffle filter off for 3d fields using lossy compression + if (nbits > 0) then + shuffle=.false. + else + shuffle=.true. + endif + if (ichunk3d < 0 .or. jchunk3d < 0 .or. kchunk3d < 0) then + ! let netcdf lib choose chunksize + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i), & + shuffle=shuffle,deflate_level=ideflate); NC_ERR_STOP(ncerr) + else + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i), & + shuffle=shuffle,deflate_level=ideflate,& + chunksizes=(/ichunk3d,jchunk3d,kchunk3d,1/)); NC_ERR_STOP(ncerr) + endif + ! compression filters require collective access. + ncerr = nf90_var_par_access(ncid, varids(i), NF90_COLLECTIVE) + else + ncerr = nf90_def_var(ncid, trim(fldName), NF90_FLOAT, & + (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) + ncerr = nf90_var_par_access(ncid, varids(i), NF90_INDEPENDENT) + endif + else if (typekind == ESMF_TYPEKIND_R8) then + ncerr = nf90_def_var(ncid, trim(fldName), NF90_DOUBLE, & + (/im_dimid,jm_dimid,pfull_dimid,time_dimid/), varids(i)); NC_ERR_STOP(ncerr) + ncerr = nf90_var_par_access(ncid, varids(i), NF90_INDEPENDENT) + else + write(0,*)'Unsupported typekind ', typekind + stop + end if + end if + + ! define variable attributes + call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, Count=attcount, & + rc=rc); ESMF_ERR_RETURN(rc) + + do j=1,attCount + call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, attributeIndex=j, & + name=attName, typekind=attTypeKind, itemCount=n, & + rc=rc); ESMF_ERR_RETURN(rc) + + if ( index(trim(attName),"ESMF") /= 0 ) then + cycle + endif + + if (attTypeKind==ESMF_TYPEKIND_I4) then + call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varival, & + rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, varids(i), trim(attName), varival); NC_ERR_STOP(ncerr) + + else if (attTypeKind==ESMF_TYPEKIND_R4) then + call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varr4val, & + rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, varids(i), trim(attName), varr4val); NC_ERR_STOP(ncerr) + + else if (attTypeKind==ESMF_TYPEKIND_R8) then + call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varr8val, & + rc=rc); ESMF_ERR_RETURN(rc) + if (trim(attName) /= '_FillValue') then + ! FIXME: _FillValue must be cast to var type when using NF90_NETCDF4 + ncerr = nf90_put_att(ncid, varids(i), trim(attName), varr8val); NC_ERR_STOP(ncerr) + endif + + else if (attTypeKind==ESMF_TYPEKIND_CHARACTER) then + call ESMF_AttributeGet(fcstField(i), convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varcval, & + rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, varids(i), trim(attName), trim(varcval)); NC_ERR_STOP(ncerr) + + end if + + end do ! j=1,attCount + + end do ! i=1,fieldCount + + ! write grid_xt, grid_yt attributes + if (trim(output_grid) == 'gaussian_grid' .or. & + trim(output_grid) == 'regional_latlon') then + ncerr = nf90_put_att(ncid, im_varid, "long_name", "T-cell longitude"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, im_varid, "units", "degrees_E"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "long_name", "T-cell latiitude"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees_N"); NC_ERR_STOP(ncerr) + else if (trim(output_grid) == 'rotated_latlon') then + ncerr = nf90_put_att(ncid, im_varid, "long_name", "rotated T-cell longiitude"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, im_varid, "units", "degrees"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "long_name", "rotated T-cell latiitude"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "units", "degrees"); NC_ERR_STOP(ncerr) + else if (trim(output_grid) == 'lambert_conformal') then + ncerr = nf90_put_att(ncid, im_varid, "long_name", "x-coordinate of projection"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, im_varid, "units", "meters"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "long_name", "y-coordinate of projection"); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, jm_varid, "units", "meters"); NC_ERR_STOP(ncerr) + endif + + ncerr = nf90_enddef(ncid); NC_ERR_STOP(ncerr) + +! end of define mode + + ! write grid_xt, grid_yt values + call ESMF_GridGetCoord(wrtGrid, coordDim=1, farrayPtr=arrayr8, rc=rc); ESMF_ERR_RETURN(rc) + istart = lbound(arrayr8,1); iend = ubound(arrayr8,1) + jstart = lbound(arrayr8,2); jend = ubound(arrayr8,2) + !print *,'in write netcdf mpi dim 1',istart,iend,jstart,jend,shape(arrayr8),minval(arrayr8(:,jstart)),maxval(arrayr8(:,jstart)) + + if (trim(output_grid) == 'gaussian_grid' .or. trim(output_grid) == 'regional_latlon') then + ncerr = nf90_put_var(ncid, im_varid, values=arrayr8(:,jstart),start=(/istart/), count=(/iend-istart+1/)); NC_ERR_STOP(ncerr) + else if (trim(output_grid) == 'rotated_latlon') then + do i=1,im + x(i) = lon1 + (lon2-lon1)/(im-1) * (i-1) + enddo + ncerr = nf90_put_var(ncid, im_varid, values=x ); NC_ERR_STOP(ncerr) + else if (trim(output_grid) == 'lambert_conformal') then + do i=1,im + x(i) = dx * (i-1) + enddo + ncerr = nf90_put_var(ncid, im_varid, values=x ); NC_ERR_STOP(ncerr) + endif + ncerr = nf90_put_var(ncid, lon_varid, values=arrayr8, start=(/istart,jstart/)); NC_ERR_STOP(ncerr) + + call ESMF_GridGetCoord(wrtGrid, coordDim=2, farrayPtr=arrayr8, rc=rc); ESMF_ERR_RETURN(rc) + !print *,'in write netcdf mpi dim 2',istart,iend,jstart,jend,shape(arrayr8),minval(arrayr8(istart,:)),maxval(arrayr8(istart,:)) + if (trim(output_grid) == 'gaussian_grid' .or. trim(output_grid) == 'regional_latlon') then + ncerr = nf90_put_var(ncid, jm_varid, values=arrayr8(istart,:),start=(/jstart/),count=(/jend-jstart+1/)); NC_ERR_STOP(ncerr) + else if (trim(output_grid) == 'rotated_latlon') then + do j=1,jm + y(j) = lat1 + (lat2-lat1)/(jm-1) * (j-1) + enddo + ncerr = nf90_put_var(ncid, jm_varid, values=y ); NC_ERR_STOP(ncerr) + else if (trim(output_grid) == 'lambert_conformal') then + do j=1,jm + y(j) = dy * (j-1) + enddo + ncerr = nf90_put_var(ncid, jm_varid, values=y ); NC_ERR_STOP(ncerr) + endif + ncerr = nf90_put_var(ncid, lat_varid, values=arrayr8, start=(/istart,jstart/)); NC_ERR_STOP(ncerr) + + do i=1, fieldCount + + call ESMF_FieldGet(fcstField(i),name=fldName,typekind=typekind, rc=rc); ESMF_ERR_RETURN(rc) + + if (fldlev(i) == 1) then + if (typekind == ESMF_TYPEKIND_R4) then + call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=arrayr4, totalLBound=totalLBound2d, totalUBound=totalUBound2d,rc=rc); ESMF_ERR_RETURN(rc) + !print *,'field name=',trim(fldName),'bound=',totalLBound2d,'ubound=',totalUBound2d + ncerr = nf90_put_var(ncid, varids(i), values=arrayr4, start=(/totalLBound2d(1),totalLBound2d(2),1/)); NC_ERR_STOP(ncerr) + else if (typekind == ESMF_TYPEKIND_R8) then + call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=arrayr8, totalLBound=totalLBound2d, totalUBound=totalUBound2d,rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_var(ncid, varids(i), values=arrayr8, start=(/totalLBound2d(1),totalLBound2d(2),1/)); NC_ERR_STOP(ncerr) + end if + else if (fldlev(i) > 1) then + if (typekind == ESMF_TYPEKIND_R4) then + call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=arrayr4_3d, totalLBound=totalLBound3d, totalUBound=totalUBound3d,rc=rc); ESMF_ERR_RETURN(rc) + if (ideflate > 0 .and. nbits > 0) then + i1=totalLBound3d(1);i2=totalUBound3d(1) + j1=totalLBound3d(2);j2=totalUBound3d(2) + k1=totalLBound3d(3);k2=totalUBound3d(3) + dataMax = maxval(arrayr4_3d(i1:i2,j1:j2,k1:k2)) + dataMin = minval(arrayr4_3d(i1:i2,j1:j2,k1:k2)) + call mpi_allreduce(mpi_in_place,dataMax,1,mpi_real4,mpi_max,mpi_comm,ierr) + call mpi_allreduce(mpi_in_place,dataMin,1,mpi_real4,mpi_min,mpi_comm,ierr) + ! Lossy compression if nbits>0. + ! The floating point data is quantized to improve compression + ! See doi:10.5194/gmd-10-413-2017. The method employed + ! here is identical to the 'scaled linear packing' method in + ! that paper, except that the data are scaling into an arbitrary + ! range (2**nbits-1 not just 2**16-1) and are stored as + ! re-scaled floats instead of short integers. + ! The zlib algorithm does almost as + ! well packing the re-scaled floats as it does the scaled + ! integers, and this avoids the need for the client to apply the + ! rescaling (plus it allows the ability to adjust the packing + ! range) + scale_fact = (dataMax - dataMin) / (2**nbits-1); offset = dataMin + allocate(arrayr4_3d_save(i1:i2,j1:j2,k1:k2)) + arrayr4_3d_save(i1:i2,j1:j2,k1:k2)=arrayr4_3d(i1:i2,j1:j2,k1:k2) + arrayr4_3d = scale_fact*(nint((arrayr4_3d_save - offset) / scale_fact)) + offset + ! compute max abs compression error. + compress_err(i) = & + maxval(abs(arrayr4_3d_save(i1:i2,j1:j2,k1:k2)-arrayr4_3d(i1:i2,j1:j2,k1:k2))) + deallocate(arrayr4_3d_save) + call mpi_allreduce(mpi_in_place,compress_err(i),1,mpi_real4,mpi_max,mpi_comm,ierr) + !print *,'field name=',trim(fldName),dataMin,dataMax,compress_err(i) + endif + ncerr = nf90_put_var(ncid, varids(i), values=arrayr4_3d, start=(/totalLBound3d(1),totalLBound3d(2),totalLBound3d(3),1/)); NC_ERR_STOP(ncerr) + else if (typekind == ESMF_TYPEKIND_R8) then + call ESMF_FieldGet(fcstField(i), localDe=0, farrayPtr=arrayr8_3d, totalLBound=totalLBound3d, totalUBound=totalUBound3d,rc=rc); ESMF_ERR_RETURN(rc) + !print *,'field name=',trim(fldName),'bound=',totalLBound3d,'ubound=',totalUBound3d + ncerr = nf90_put_var(ncid, varids(i), values=arrayr8_3d, start=(/totalLBound3d(1),totalLBound3d(2),totalLBound3d(3),1/)); NC_ERR_STOP(ncerr) + end if + + end if !end fldlev(i) + + end do ! end fieldCount + + if (ideflate > 0 .and. nbits > 0) then + ncerr = nf90_redef(ncid=ncid); NC_ERR_STOP(ncerr) + do i=1, fieldCount + if (compress_err(i) > 0) then + ncerr = nf90_put_att(ncid, varids(i), 'max_abs_compression_error', compress_err(i)); NC_ERR_STOP(ncerr) + ncerr = nf90_put_att(ncid, varids(i), 'nbits', nbits); NC_ERR_STOP(ncerr) + endif + enddo + ncerr = nf90_enddef(ncid=ncid); NC_ERR_STOP(ncerr) + endif + + deallocate(fcstField) + deallocate(varids) + deallocate(compress_err) + + ncerr = nf90_close(ncid=ncid); NC_ERR_STOP(ncerr) + !call mpi_barrier(mpi_comm,ierr) + !print *,'netcdf parallel close, finished write_netcdf_parallel' + + end subroutine write_netcdf_parallel +#endif + +!---------------------------------------------------------------------------------------- + subroutine get_global_attr(fldbundle, ncid, rc) + type(ESMF_FieldBundle), intent(in) :: fldbundle + integer, intent(in) :: ncid + integer, intent(out) :: rc + +! local variable + integer :: i, attcount + integer :: ncerr + character(len=ESMF_MAXSTR) :: attName + type(ESMF_TypeKind_Flag) :: typekind + + integer :: varival + real(ESMF_KIND_R4) :: varr4val + real(ESMF_KIND_R4), dimension(:), allocatable :: varr4list + real(ESMF_KIND_R8) :: varr8val + real(ESMF_KIND_R8), dimension(:), allocatable :: varr8list + integer :: itemCount + character(len=ESMF_MAXSTR) :: varcval +! + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, Count=attcount, & + rc=rc); ESMF_ERR_RETURN(rc) + + do i=1,attCount + + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, attributeIndex=i, name=attName, & + typekind=typekind, itemCount=itemCount, rc=rc); ESMF_ERR_RETURN(rc) + + if (typekind==ESMF_TYPEKIND_I4) then + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varival, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), varival); NC_ERR_STOP(ncerr) + + else if (typekind==ESMF_TYPEKIND_R4) then + allocate (varr4list(itemCount)) + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + name=trim(attName), valueList=varr4list, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), varr4list); NC_ERR_STOP(ncerr) + deallocate(varr4list) + + else if (typekind==ESMF_TYPEKIND_R8) then + allocate (varr8list(itemCount)) + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + name=trim(attName), valueList=varr8list, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), varr8list); NC_ERR_STOP(ncerr) + deallocate(varr8list) + + else if (typekind==ESMF_TYPEKIND_CHARACTER) then + call ESMF_AttributeGet(fldbundle, convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varcval, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, NF90_GLOBAL, trim(attName), trim(varcval)); NC_ERR_STOP(ncerr) + + end if + + end do + + end subroutine get_global_attr +! +!---------------------------------------------------------------------------------------- + subroutine get_grid_attr(grid, prefix, ncid, varid, rc) + type(ESMF_Grid), intent(in) :: grid + character(len=*), intent(in) :: prefix + integer, intent(in) :: ncid + integer, intent(in) :: varid + integer, intent(out) :: rc + +! local variable + integer :: i, attcount, n, ind + integer :: ncerr + character(len=ESMF_MAXSTR) :: attName + type(ESMF_TypeKind_Flag) :: typekind + + integer :: varival + real(ESMF_KIND_R4) :: varr4val + real(ESMF_KIND_R8) :: varr8val + character(len=ESMF_MAXSTR) :: varcval +! + call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, Count=attcount, & + rc=rc); ESMF_ERR_RETURN(rc) + + !write(0,*)'grid attcount = ', attcount + do i=1,attCount + + call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, attributeIndex=i, name=attName, & + typekind=typekind, itemCount=n, rc=rc); ESMF_ERR_RETURN(rc) + !write(0,*)'grid att = ',i,trim(attName), ' itemCount = ' , n + + if (index(trim(attName), trim(prefix)//":")==1) then + ind = len(trim(prefix)//":") + + if (typekind==ESMF_TYPEKIND_I4) then + call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varival, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, varid, trim(attName(ind+1:len(attName))), varival); NC_ERR_STOP(ncerr) + + else if (typekind==ESMF_TYPEKIND_R4) then + call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varr4val, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, varid, trim(attName(ind+1:len(attName))), varr4val); NC_ERR_STOP(ncerr) + + else if (typekind==ESMF_TYPEKIND_R8) then + call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varr8val, rc=rc); ESMF_ERR_RETURN(rc) + if (trim(attName) /= '_FillValue') then + ! FIXME: _FillValue must be cast to var type when using + ! NF90_NETCDF4. Until this is fixed, using netCDF default _FillValue. + ncerr = nf90_put_att(ncid, varid, trim(attName(ind+1:len(attName))), varr8val); NC_ERR_STOP(ncerr) + endif + + else if (typekind==ESMF_TYPEKIND_CHARACTER) then + call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", & + name=trim(attName), value=varcval, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_put_att(ncid, varid, trim(attName(ind+1:len(attName))), trim(varcval)); NC_ERR_STOP(ncerr) + + end if + + end if + + end do + + end subroutine get_grid_attr + + subroutine add_dim(ncid, dim_name, dimid, grid, rc) + integer, intent(in) :: ncid + character(len=*), intent(in) :: dim_name + integer, intent(inout) :: dimid + type(ESMF_Grid), intent(in) :: grid + integer, intent(out) :: rc + +! local variable + integer :: i, attcount, n, dim_varid + integer :: ncerr + character(len=ESMF_MAXSTR) :: attName + type(ESMF_TypeKind_Flag) :: typekind + + integer, allocatable :: valueListI(:) + real(ESMF_KIND_R4), allocatable :: valueListR4(:) + real(ESMF_KIND_R8), allocatable :: valueListR8(:) + character(len=ESMF_MAXSTR), allocatable :: valueListC(:) +! + call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, name=dim_name, & + typekind=typekind, itemCount=n, rc=rc); ESMF_ERR_RETURN(rc) + + if ( trim(dim_name) == "time" ) then + ! using an unlimited dim requires collective mode (NF90_COLLECTIVE) + ! for parallel writes, which seems to slow things down on hera. + !ncerr = nf90_def_dim(ncid, trim(dim_name), NF90_UNLIMITED, dimid); NC_ERR_STOP(ncerr) + ncerr = nf90_def_dim(ncid, trim(dim_name), 1, dimid); NC_ERR_STOP(ncerr) + else + ncerr = nf90_def_dim(ncid, trim(dim_name), n, dimid); NC_ERR_STOP(ncerr) + end if + + if (typekind==ESMF_TYPEKIND_R8) then + ncerr = nf90_def_var(ncid, dim_name, NF90_REAL8, dimids=(/dimid/), varid=dim_varid); NC_ERR_STOP(ncerr) + ncerr = nf90_var_par_access(ncid, dim_varid, NF90_INDEPENDENT) + allocate(valueListR8(n)) + call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", & + name=trim(dim_name), valueList=valueListR8, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_enddef(ncid=ncid); NC_ERR_STOP(ncerr) + ncerr = nf90_put_var(ncid, dim_varid, values=valueListR8 ); NC_ERR_STOP(ncerr) + ncerr = nf90_redef(ncid=ncid); NC_ERR_STOP(ncerr) + deallocate(valueListR8) + else if (typekind==ESMF_TYPEKIND_R4) then + ncerr = nf90_def_var(ncid, dim_name, NF90_REAL4, dimids=(/dimid/), varid=dim_varid); NC_ERR_STOP(ncerr) + ncerr = nf90_var_par_access(ncid, dim_varid, NF90_INDEPENDENT) + allocate(valueListR4(n)) + call ESMF_AttributeGet(grid, convention="NetCDF", purpose="FV3", & + name=trim(dim_name), valueList=valueListR4, rc=rc); ESMF_ERR_RETURN(rc) + ncerr = nf90_enddef(ncid=ncid); NC_ERR_STOP(ncerr) + ncerr = nf90_put_var(ncid, dim_varid, values=valueListR4 ); NC_ERR_STOP(ncerr) + ncerr = nf90_redef(ncid=ncid); NC_ERR_STOP(ncerr) + deallocate(valueListR4) + else + write(0,*)'Error in module_write_netcdf.F90(add_dim) unknown typekind for ',trim(dim_name) + call ESMF_Finalize(endflag=ESMF_END_ABORT) + end if + + call get_grid_attr(grid, dim_name, ncid, dim_varid, rc) + + end subroutine add_dim +! +!---------------------------------------------------------------------------------------- + subroutine nccheck(status) + use netcdf + implicit none + integer, intent (in) :: status + + if (status /= nf90_noerr) then + write(0,*) status, trim(nf90_strerror(status)) + stop "stopped" + end if + end subroutine nccheck + +end module module_write_netcdf_parallel diff --git a/io/module_wrt_grid_comp.F90 b/io/module_wrt_grid_comp.F90 index 84769eaea..a7177fe22 100644 --- a/io/module_wrt_grid_comp.F90 +++ b/io/module_wrt_grid_comp.F90 @@ -34,7 +34,8 @@ module module_wrt_grid_comp use module_fv3_io_def, only : num_pes_fcst,lead_wrttask, last_wrttask, & n_group, num_files, app_domain, & filename_base, output_grid, output_file, & - imo, jmo, write_nemsioflip, & + imo,jmo,ichunk2d,jchunk2d,write_nemsioflip,& + ichunk3d,jchunk3d,kchunk3d, & nsout => nsout_io, & cen_lon, cen_lat, & lon1, lat1, lon2, lat2, dlon, dlat, & @@ -43,6 +44,7 @@ module module_wrt_grid_comp use module_write_netcdf, only : write_netcdf use physcons, only : pi => con_pi use post_gfs, only : post_run_gfs, post_getattr_gfs + use module_write_netcdf_parallel, only : write_netcdf_parallel ! !----------------------------------------------------------------------- ! @@ -1122,14 +1124,14 @@ subroutine wrt_initialize(wrt_comp, imp_state_write, exp_state_write, clock, rc) !----------------------------------------------------------------------- ! call ESMF_LogWrite("before initialize for nemsio file", ESMF_LOGMSG_INFO, rc=rc) - if (trim(output_grid) == 'gaussian_grid' .and. trim(output_file) == 'nemsio') then -! if (lprnt) write(0,*) 'in wrt initial, befnemsio_first_call wrt_int_state%FBcount=',wrt_int_state%FBcount - do i= 1, wrt_int_state%FBcount - call nemsio_first_call(wrt_int_state%wrtFB(i), imo, jmo, & - wrt_int_state%mype, ntasks, wrt_mpi_comm, & - wrt_int_state%FBcount, i, idate, lat, lon, rc) - enddo - endif + do i= 1, wrt_int_state%FBcount + if (trim(output_grid) == 'gaussian_grid' .and. trim(output_file(i)) == 'nemsio') then +! if (lprnt) write(0,*) 'in wrt initial, befnemsio_first_call wrt_int_state%FBcount=',wrt_int_state%FBcount + call nemsio_first_call(wrt_int_state%wrtFB(i), imo, jmo, & + wrt_int_state%mype, ntasks, wrt_mpi_comm, & + wrt_int_state%FBcount, i, idate, lat, lon, rc) + endif + enddo call ESMF_LogWrite("after initialize for nemsio file", ESMF_LOGMSG_INFO, rc=rc) ! !----------------------------------------------------------------------- @@ -1170,7 +1172,7 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc) type(ESMF_Time) :: currtime type(ESMF_TypeKind_Flag) :: datatype type(ESMF_Field) :: field_work - type(ESMF_Grid) :: grid_work, fbgrid + type(ESMF_Grid) :: grid_work, fbgrid, wrtgrid type(ESMF_Array) :: array_work type(ESMF_State),save :: stateGridFB type(optimizeT), save :: optimize(4) @@ -1363,7 +1365,47 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc) file_bundle = wrt_int_state%wrtFB(nbdl) endif - if ( trim(output_file) == 'nemsio' ) then + ! set default chunksizes for netcdf output + ! (use MPI decomposition size). + ! if chunksize parameter set to negative value, + ! netcdf library default is used. + if (output_file(nbdl)(1:6) == 'netcdf') then + if (ichunk2d == 0) then + if( wrt_int_state%mype == 0 ) & + ichunk2d = wrt_int_state%lon_end-wrt_int_state%lon_start+1 + call mpi_bcast(ichunk2d,1,mpi_integer,0,wrt_mpi_comm,rc) + endif + if (jchunk2d == 0) then + if( wrt_int_state%mype == 0 ) & + jchunk2d = wrt_int_state%lat_end-wrt_int_state%lat_start+1 + call mpi_bcast(jchunk2d,1,mpi_integer,0,wrt_mpi_comm,rc) + endif + if (ichunk3d == 0) then + if( wrt_int_state%mype == 0 ) & + ichunk3d = wrt_int_state%lon_end-wrt_int_state%lon_start+1 + call mpi_bcast(ichunk3d,1,mpi_integer,0,wrt_mpi_comm,rc) + endif + if (jchunk3d == 0) then + if( wrt_int_state%mype == 0 ) & + jchunk3d = wrt_int_state%lat_end-wrt_int_state%lat_start+1 + call mpi_bcast(jchunk3d,1,mpi_integer,0,wrt_mpi_comm,rc) + endif + if (kchunk3d == 0 .and. nbdl == 1) then + if( wrt_int_state%mype == 0 ) then + call ESMF_FieldBundleGet(wrt_int_state%wrtFB(nbdl), grid=wrtgrid) + call ESMF_AttributeGet(wrtgrid, convention="NetCDF", purpose="FV3", & + attnestflag=ESMF_ATTNEST_OFF, name='pfull', & + itemCount=kchunk3d, rc=rc) + endif + call mpi_bcast(kchunk3d,1,mpi_integer,0,wrt_mpi_comm,rc) + endif + if (wrt_int_state%mype == 0) then + print *,'ichunk2d,jchunk2d',ichunk2d,jchunk2d + print *,'ichunk3d,jchunk3d,kchunk3d',ichunk3d,jchunk3d,kchunk3d + endif + endif + + if ( trim(output_file(nbdl)) == 'nemsio' ) then filename = trim(wrt_int_state%wrtFB_names(nbdl))//'f'//trim(cfhour)//'.nemsio' else filename = trim(wrt_int_state%wrtFB_names(nbdl))//'f'//trim(cfhour)//'.nc' @@ -1413,7 +1455,7 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc) else if (trim(output_grid) == 'gaussian_grid') then - if (trim(output_file) == 'nemsio') then + if (trim(output_file(nbdl)) == 'nemsio') then wbeg = MPI_Wtime() call write_nemsio(file_bundle,trim(filename),nf_hours, nf_minutes, & @@ -1424,18 +1466,37 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc) ,' at Fcst ',NF_HOURS,':',NF_MINUTES endif - else if (trim(output_file) == 'netcdf') then + else if (trim(output_file(nbdl)) == 'netcdf') then wbeg = MPI_Wtime() call write_netcdf(file_bundle,wrt_int_state%wrtFB(nbdl),trim(filename), & - wrt_mpi_comm,wrt_int_state%mype,imo,jmo,rc) + wrt_mpi_comm,wrt_int_state%mype,imo,jmo,& + ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d,rc) wend = MPI_Wtime() if (lprnt) then write(*,'(A,F10.5,A,I4.2,A,I2.2)')' netcdf Write Time is ',wend-wbeg & ,' at Fcst ',NF_HOURS,':',NF_MINUTES endif - else if (trim(output_file) == 'netcdf_esmf') then + else if (trim(output_file(nbdl)) == 'netcdf_parallel') then + +#ifdef NO_PARALLEL_NETCDF + rc = ESMF_RC_NOT_IMPL + print *,'netcdf_parallel not available on this machine' + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return +#endif + wbeg = MPI_Wtime() + call write_netcdf_parallel(file_bundle,wrt_int_state%wrtFB(nbdl), & + trim(filename), wrt_mpi_comm,wrt_int_state%mype,imo,jmo,& + ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d,rc) + wend = MPI_Wtime() + if (lprnt) then + write(*,'(A,F10.5,A,I4.2,A,I2.2)')' parallel netcdf Write Time is ',wend-wbeg & + ,' at Fcst ',NF_HOURS,':',NF_MINUTES + endif + + else if (trim(output_file(nbdl)) == 'netcdf_esmf') then wbeg = MPI_Wtime() call ESMFproto_FieldBundleWrite(gridFB, filename=trim(filename), & @@ -1482,11 +1543,12 @@ subroutine wrt_run(wrt_comp, imp_state_write, exp_state_write,clock,rc) write(*,'(A,F10.5,A,I4.2,A,I2.2)')' mask_fields time is ',wend-wbeg endif - if (trim(output_file) == 'netcdf') then + if (trim(output_file(nbdl)) == 'netcdf') then wbeg = MPI_Wtime() call write_netcdf(file_bundle,wrt_int_state%wrtFB(nbdl),trim(filename), & - wrt_mpi_comm,wrt_int_state%mype,imo,jmo,rc) + wrt_mpi_comm,wrt_int_state%mype,imo,jmo,& + ichunk2d,jchunk2d,ichunk3d,jchunk3d,kchunk3d,rc) wend = MPI_Wtime() if (mype == lead_write_task) then write(*,'(A,F10.5,A,I4.2,A,I2.2)')' netcdf Write Time is ',wend-wbeg & From 8683a693b8d798b0aed45de8aee8b4293af55c86 Mon Sep 17 00:00:00 2001 From: junwang-noaa <37633869+junwang-noaa@users.noreply.github.com> Date: Tue, 18 Feb 2020 12:05:52 -0500 Subject: [PATCH 2/4] Iaudrymass coupledfield ww3 (#61) * add namelist variable iau_drymassfixer and use consistent coupled fields * dump out coupled fields in native grid for fv3 * add default(none) for openmp in post_gfs.F90 * update fv3 dycore with reproduce option in g_sum * change fv3 dycore to point to the NOAA-EMC dev/emc branch --- atmos_cubed_sphere | 2 +- atmos_model.F90 | 10 +- cpl/module_cap_cpl.F90 | 108 ++++++++- cpl/module_cplfields.F90 | 10 +- fv3_cap.F90 | 7 +- gfsphysics/GFS_layer/GFS_typedefs.F90 | 5 +- io/post_gfs.F90 | 326 +++++++++++++------------- 7 files changed, 293 insertions(+), 175 deletions(-) diff --git a/atmos_cubed_sphere b/atmos_cubed_sphere index a56907a44..db3acfbec 160000 --- a/atmos_cubed_sphere +++ b/atmos_cubed_sphere @@ -1 +1 @@ -Subproject commit a56907a44461c7151e0ba266e160c8f1a1685882 +Subproject commit db3acfbec2ca00d1795b72b7ebf0b1e308506ced diff --git a/atmos_model.F90 b/atmos_model.F90 index 9a73e0151..ea336a100 100644 --- a/atmos_model.F90 +++ b/atmos_model.F90 @@ -1740,7 +1740,7 @@ subroutine assign_importdata(rc) ! get upward LW flux: for sea ice covered area !---------------------------------------------- - fldname = 'mean_up_lw_flx' + fldname = 'mean_up_lw_flx_ice' if (trim(impfield_name) == trim(fldname)) then findex = QueryFieldList(ImportFieldsList,fldname) if (importFieldsValid(findex)) then @@ -1767,7 +1767,7 @@ subroutine assign_importdata(rc) ! get latent heat flux: for sea ice covered area !------------------------------------------------ - fldname = 'mean_laten_heat_flx' + fldname = 'mean_laten_heat_flx_atm_into_ice' if (trim(impfield_name) == trim(fldname)) then findex = QueryFieldList(ImportFieldsList,fldname) if (importFieldsValid(findex)) then @@ -1787,7 +1787,7 @@ subroutine assign_importdata(rc) ! get sensible heat flux: for sea ice covered area !-------------------------------------------------- - fldname = 'mean_sensi_heat_flx' + fldname = 'mean_sensi_heat_flx_atm_into_ice' if (trim(impfield_name) == trim(fldname)) then findex = QueryFieldList(ImportFieldsList,fldname) if (importFieldsValid(findex)) then @@ -1807,7 +1807,7 @@ subroutine assign_importdata(rc) ! get zonal compt of momentum flux: for sea ice covered area !------------------------------------------------------------ - fldname = 'mean_zonal_moment_flx' + fldname = 'stress_on_air_ice_zonal' if (trim(impfield_name) == trim(fldname)) then findex = QueryFieldList(ImportFieldsList,fldname) if (importFieldsValid(findex)) then @@ -1827,7 +1827,7 @@ subroutine assign_importdata(rc) ! get meridional compt of momentum flux: for sea ice covered area !----------------------------------------------------------------- - fldname = 'mean_merid_moment_flx' + fldname = 'stress_on_air_ice_merid' if (trim(impfield_name) == trim(fldname)) then findex = QueryFieldList(ImportFieldsList,fldname) if (importFieldsValid(findex)) then diff --git a/cpl/module_cap_cpl.F90 b/cpl/module_cap_cpl.F90 index 3e858c0e0..073f7defd 100644 --- a/cpl/module_cap_cpl.F90 +++ b/cpl/module_cap_cpl.F90 @@ -102,7 +102,8 @@ subroutine realizeConnectedCplFields(state, grid, numLevels, numSoilLayers, numTracers, & num_diag_sfc_emis_flux, num_diag_down_flux, & num_diag_type_down_flux, num_diag_burn_emis_flux, & - num_diag_cmass, fieldNames, fieldTypes, fieldList, rc) + num_diag_cmass, fieldNames, fieldTypes, state_tag,& + fieldList, rc) type(ESMF_State), intent(inout) :: state type(ESMF_Grid), intent(in) :: grid @@ -116,6 +117,7 @@ subroutine realizeConnectedCplFields(state, grid, integer, intent(in) :: num_diag_cmass character(len=*), dimension(:), intent(in) :: fieldNames character(len=*), dimension(:), intent(in) :: fieldTypes + character(len=*), intent(in) :: state_tag !< Import or export. type(ESMF_Field), dimension(:), intent(out) :: fieldList integer, intent(out) :: rc @@ -196,10 +198,14 @@ subroutine realizeConnectedCplFields(state, grid, if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! -- save field fieldList(item) = field + call ESMF_LogWrite('realizeConnectedCplFields '//trim(state_tag)//' Field '//trim(fieldNames(item)) & + // ' is connected ', ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=rc) else ! remove a not connected Field from State call ESMF_StateRemove(state, (/trim(fieldNames(item))/), rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + call ESMF_LogWrite('realizeConnectedCplFields '//trim(state_tag)//' Field '//trim(fieldNames(item)) & + // ' is not connected ', ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=rc) end if end do @@ -217,6 +223,7 @@ subroutine Dump_cplFields(gcomp, importState, exportstate, clock_fv3, & integer :: timeslice ! character(len=160) :: nuopcMsg + character(len=160) :: filename integer :: rc ! call ESMF_ClockPrint(clock_fv3, options="currTime", & @@ -237,12 +244,18 @@ subroutine Dump_cplFields(gcomp, importState, exportstate, clock_fv3, & timeslice = timeslice + 1 call ESMF_GridCompGet(gcomp, importState=importState, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMFPP_RegridWriteState(importState, "fv3_cap_import_", timeslice, rc=rc) + ! replace with tiled field dumps + !call ESMFPP_RegridWriteState(importState, "fv3_cap_import_", timeslice, rc=rc) + write(filename,'(a,i6.6)') 'fv3_cap_import_',timeslice + call State_RWFields_tiles(importState,trim(filename), timeslice, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call ESMF_GridCompGet(gcomp, exportState=exportState, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMFPP_RegridWriteState(exportState, "fv3_cap_export_", timeslice, rc=rc) + ! replace with tiled field dumps + !call ESMFPP_RegridWriteState(exportState, "fv3_cap_export_", timeslice, rc=rc) + write(filename,'(a,i6.6)') 'fv3_cap_export_',timeslice + call State_RWFields_tiles(exportState,trim(filename), timeslice, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return endif ! @@ -345,6 +358,95 @@ subroutine ESMFPP_RegridWrite(inField, outGrid, regridMethod, fileName, fieldNam end subroutine ESMFPP_RegridWrite + !----------------------------------------------------------------------------- + + ! This subroutine requires ESMFv8 - for coupled FV3 + subroutine State_RWFields_tiles(state,filename,timeslice,rc) + + type(ESMF_State), intent(in) :: state + character(len=*), intent(in) :: fileName + integer, intent(in) :: timeslice + integer, intent(out) :: rc + + ! local + type(ESMF_Field) :: firstESMFFLD + type(ESMF_Field),allocatable :: flds(:) + type(ESMF_GridComp) :: IOComp + type(ESMF_Grid) :: gridFv3 + + character(len=256) :: msgString + integer :: i, icount, ifld + integer :: fieldcount, firstfld + character(64), allocatable :: itemNameList(:), fldNameList(:) + type(ESMF_StateItem_Flag), allocatable :: typeList(:) + + character(len=*),parameter :: subname='(module_cap_cpl:State_RWFields_tiles)' + + ! local variables + + rc = ESMF_SUCCESS + !call ESMF_LogWrite(trim(subname)//trim(filename)//": called", + !ESMF_LOGMSG_INFO, rc=rc) + + call ESMF_StateGet(state, itemCount=icount, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + allocate(typeList(icount), itemNameList(icount)) + call ESMF_StateGet(state, itemTypeList=typeList, itemNameList=itemNameList, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + ! find first stateitem that is a field and the count of fields + firstfld = 0; fieldcount = 0 + do i = icount,1,-1 + if(typeList(i) == ESMF_STATEITEM_FIELD) firstfld = i + if(typeList(i) == ESMF_STATEITEM_FIELD) fieldcount = fieldcount + 1 + enddo + !write(msgString,*) trim(subname)//' icount = ',icount," fieldcount = + !",fieldcount," firstfld = ",firstfld + !call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO, rc=rc) + + allocate(flds(fieldCount),fldNameList(fieldCount)) + ifld = 1 + do i = 1, icount + if(typeList(i) == ESMF_STATEITEM_FIELD) then + fldNameList(ifld) = itemNameList(i) + ifld = ifld + 1 + endif + enddo + + call ESMF_LogWrite(trim(subname)//": write "//trim(filename)//"tile1-tile6", ESMF_LOGMSG_INFO, rc=rc) + ! get first field + call ESMF_StateGet(state, itemName=itemNameList(firstfld), field=firstESMFFLD, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return ! bail out + + call ESMF_FieldGet(firstESMFFLD, grid=gridFv3, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return ! bail out + + IOComp = ESMFIO_Create(gridFv3, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return ! bail out + call ESMF_LogWrite(trim(subname)//": write "//trim(filename), ESMF_LOGMSG_INFO, rc=rc) + + do ifld=1, fieldCount + call ESMF_StateGet(state, itemName=fldNameList(ifld), field=flds(ifld), rc=rc) + enddo + + call ESMFIO_Write(IOComp, filename, flds, filePath='./', rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) return ! bail out + +! -- Finalize ESMFIO + deallocate(flds) + deallocate(fldNameList) + call ESMFIO_Destroy(IOComp, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) call ESMF_Finalize() + + !call ESMF_LogWrite(trim(subname)//trim(filename)//": finished", + !ESMF_LOGMSG_INFO, rc=rc) + + end subroutine State_RWFields_tiles !----------------------------------------------------------------------------- diff --git a/cpl/module_cplfields.F90 b/cpl/module_cplfields.F90 index 48997ce4f..3b0b0be16 100644 --- a/cpl/module_cplfields.F90 +++ b/cpl/module_cplfields.F90 @@ -152,12 +152,12 @@ module module_cplfields ! "inst_ice_ir_dir_albedo ", & ! "inst_ice_vis_dif_albedo ", & ! "inst_ice_vis_dir_albedo ", & - "mean_up_lw_flx ", & - "mean_laten_heat_flx ", & - "mean_sensi_heat_flx ", & + "mean_up_lw_flx_ice ", & + "mean_laten_heat_flx_atm_into_ice ", & + "mean_sensi_heat_flx_atm_into_ice ", & ! "mean_evap_rate ", & - "mean_zonal_moment_flx ", & - "mean_merid_moment_flx ", & + "stress_on_air_ice_zonal ", & + "stress_on_air_ice_merid ", & "mean_ice_volume ", & "mean_snow_volume ", & "inst_tracer_up_surface_flx ", & diff --git a/fv3_cap.F90 b/fv3_cap.F90 index c211a9044..0a352846f 100644 --- a/fv3_cap.F90 +++ b/fv3_cap.F90 @@ -913,14 +913,17 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) call realizeConnectedCplFields(exportState, fcstGrid, & numLevels, numSoilLayers, numTracers, num_diag_sfc_emis_flux, & num_diag_down_flux, num_diag_type_down_flux, num_diag_burn_emis_flux, & - num_diag_cmass, exportFieldsList, exportFieldTypes, exportFields, rc) + num_diag_cmass, exportFieldsList, exportFieldTypes, 'FV3 Export', & + exportFields, rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return ! -- realize connected fields in importState call realizeConnectedCplFields(importState, fcstGrid, & numLevels, numSoilLayers, numTracers, num_diag_sfc_emis_flux, & num_diag_down_flux, num_diag_type_down_flux, num_diag_burn_emis_flux, & - num_diag_cmass, importFieldsList, importFieldTypes, importFields, rc) + num_diag_cmass, importFieldsList, importFieldTypes, 'FV3 Import', & + importFields, rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return end if endif diff --git a/gfsphysics/GFS_layer/GFS_typedefs.F90 b/gfsphysics/GFS_layer/GFS_typedefs.F90 index 09ab1da2d..8cef85314 100644 --- a/gfsphysics/GFS_layer/GFS_typedefs.F90 +++ b/gfsphysics/GFS_layer/GFS_typedefs.F90 @@ -1074,7 +1074,7 @@ module GFS_typedefs real(kind=kind_phys) :: iau_delthrs ! iau time interval (to scale increments) in hours character(len=240) :: iau_inc_files(7)! list of increment files real(kind=kind_phys) :: iaufhrs(7) ! forecast hours associated with increment files - logical :: iau_filter_increments + logical :: iau_filter_increments, iau_drymassfixer #ifdef CCPP ! From physcons.F90, updated/set in control_initialize @@ -3058,6 +3058,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & character(len=240) :: iau_inc_files(7) = '' !< list of increment files real(kind=kind_phys) :: iaufhrs(7) = -1 !< forecast hours associated with increment files logical :: iau_filter_increments = .false. !< filter IAU increments + logical :: iau_drymassfixer = .false. !< IAU dry mass fixer !--- debug flag logical :: debug = .false. @@ -3170,6 +3171,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & ca_sgs, ca_global,iseed_ca,ca_smooth,isppt_deep,nspinup, & !--- IAU iau_delthrs,iaufhrs,iau_inc_files,iau_filter_increments, & + iau_drymassfixer, & !--- debug options debug, pre_rad, & !--- parameter range for critical relative humidity @@ -3650,6 +3652,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & Model%iau_inc_files = iau_inc_files Model%iau_delthrs = iau_delthrs Model%iau_filter_increments = iau_filter_increments + Model%iau_drymassfixer = iau_drymassfixer if(Model%me==0) print *,' model init,iaufhrs=',Model%iaufhrs !--- tracer handling diff --git a/io/post_gfs.F90 b/io/post_gfs.F90 index daa6551bd..e9358ec91 100644 --- a/io/post_gfs.F90 +++ b/io/post_gfs.F90 @@ -183,7 +183,7 @@ subroutine post_run_gfs(wrt_int_state,mypei,mpicomp,lead_write, & call set_outflds(kth,th,kpv,pv) if(allocated(datapd))deallocate(datapd) allocate(datapd(wrt_int_state%im,jte-jts+1,nrecout+100)) -!$omp parallel do private(i,j,k) +!$omp parallel do default(none),private(i,j,k),shared(nrecout,jend,jsta,im,datapd) do k=1,nrecout+100 do j=1,jend+1-jsta do i=1,im @@ -433,7 +433,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! write(6,*) 'maptype and gridtype is ', maptype,gridtype ! -!$omp parallel do private(i,j,tem) +!$omp parallel do default(shared),private(i,j) do j=jsta,jend do i=1,im gdlat(i,j) = wrt_int_state%latPtr(i,j) @@ -448,7 +448,8 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! print*,'latstart,latlast B bcast= ',latstart,latlast ! print*,'lonstart,lonlast B bcast= ',lonstart,lonlast -!$omp parallel do private(i,j) +!$omp parallel do default(none),private(i,j,ip1), & +!$omp& shared(jsta,jend_m,im,dx,gdlat,gdlon,dy) do j = jsta, jend_m do i = 1, im ip1 = i + 1 @@ -459,13 +460,12 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & end do ! if(.not. allocated(ak5)) allocate(ak5(lm+1),bk5(lm+1)) -!$omp parallel do private(i) do i=1,lm+1 ak5(i) = wrt_int_state%ak(i) bk5(i) = wrt_int_state%bk(i) enddo -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(jsta,jend,im,f,gdlat) do j=jsta,jend do i=1,im f(I,J) = 1.454441e-4*sin(gdlat(i,j)*dtr) ! 2*omeg*sin(phi) @@ -478,7 +478,8 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! GFS may not have model derived radar ref. ! TKE ! cloud amount -!$omp parallel do private(i,j,l) +!$omp parallel do default(none),private(i,j,l), & +!$omp& shared(lm,jsta,jend,im,spval,ref_10cm,q2,cfr) do l=1,lm do j=jsta,jend do i=1,im @@ -492,7 +493,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! GFS does not have surface specific humidity ! inst sensible heat flux ! inst latent heat flux -!$omp parallel do private(i,j) +!$omp parallel do default(none),private(i,j),shared(jsta,jend,im,spval,qs,twbs,qwbs,ths) do j=jsta,jend do i=1,im qs(i,j) = SPVAL @@ -512,7 +513,8 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! 10 m theta ! 10 m humidity ! snow free albedo -!$omp parallel do private(i,j) +!$omp parallel do default(none), private(i,j), shared(jsta,jend,im,spval), & +!$omp& shared(cldefi,lspa,th10,q10,albase) do j=jsta,jend do i=1,im cldefi(i,j) = SPVAL @@ -524,7 +526,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & enddo ! GFS does not have convective precip -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(jsta,jend,im,cprate) do j=jsta,jend do i=1,im cprate(i,j) = 0. @@ -536,7 +538,8 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst cloud fraction for high, middle, and low cloud, ! cfrach ! inst ground heat flux, grnflx -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(jsta,jend,im,spval), & +!$omp& shared(czen,czmean,radot,cfrach,cfracl,cfracm,grnflx) do j=jsta,jend do i=1,im czen(i,j) = SPVAL @@ -566,7 +569,8 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst outgoing sfc shortwave, rswout ! snow phase change heat flux, snopcx ! GFS does not use total momentum flux,sfcuvx -!$omp parallel do private(i,j) +!$omp parallel do default(none),private(i,j),shared(jsta,jend,im,spval), & +!$omp& shared(acfrcv,ncfrcv,acfrst,ncfrst,bgroff,rlwin,rlwtoa,rswin,rswinc,rswout,snopcx,sfcuvx) do j=jsta,jend do i=1,im acfrcv(i,j) = spval @@ -596,6 +600,8 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! temperature tendency due to latent heating from convection ! temperature tendency due to latent heating from grid scale do l=1,lm +!$omp parallel do default(none),private(i,j),shared(jsta_2l,jend_2u,im,spval,l), & +!$omp& shared(rlwtt,rswtt,tcucn,tcucns,train) do j=jsta_2l,jend_2u do i=1,im rlwtt(i,j,l) = spval @@ -624,7 +630,8 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! v at roughness length, vz0 ! shelter rh max, maxrhshltr ! shelter rh min, minrhshltr -!$omp parallel do private(i,j) +!$omp parallel do default(none),private(i,j),shared(jsta_2l,jend_2u,im,spval), & +!$omp& shared(smstav,sfcevp,acsnow,acsnom,qz0,uz0,vz0,maxrhshltr,minrhshltr) do j=jsta_2l,jend_2u do i=1,im smstav(i,j) = spval @@ -642,7 +649,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! GFS does not have mixing length,el_pbl ! exchange coefficient, exch_h do l=1,lm -!$omp parallel do private(i,j) +!$omp parallel do default(none),private(i,j),shared(jsta_2l,jend_2u,im,l,spval,el_pbl,exch_h) do j=jsta_2l,jend_2u do i=1,im el_pbl(i,j,l) = spval @@ -652,7 +659,8 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & enddo ! GFS does not have deep convective cloud top and bottom fields -!$omp parallel do private(i,j) +!$omp parallel do default(none),private(i,j),shared(jsta_2l,jend_2u,im,spval), & +!$omp& shared(htopd,hbotd,htops,hbots,cuppt) do j=jsta_2l,jend_2u do i=1,im htopd(i,j) = SPVAL @@ -688,7 +696,8 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & tstart = 0. ! !** initialize cloud water and ice mixing ratio -!$omp parallel do private(i,j,l) +!$omp parallel do default(none),private(i,j,l),shared(lm,jsta,jend,im), & +!$omp& shared(qqw,qqr,qqs,qqi) do l = 1,lm do j = jsta, jend do i = 1,im @@ -723,7 +732,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & line=__LINE__, file=__FILE__)) return ! bail out ista = lbound(arrayr42d,1) iend = ubound(arrayr42d,1) -!$omp parallel do private(i,j) + !$omp parallel do default(none),private(i,j),shared(jsta,jend,ista,iend,spval,arrayr42d,sm) do j=jsta, jend do i=ista, iend if (arrayr42d(i,j) /= spval) sm(i,j) = 1.- arrayr42d(i,j) @@ -747,7 +756,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & line=__LINE__, file=__FILE__)) return ! bail out ista = lbound(arrayr42d,1) iend = ubound(arrayr42d,1) -!$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,sice,arrayr42d,sm) do j=jsta, jend do i=ista, iend sice(i,j) = arrayr42d(i,j) @@ -806,7 +815,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & line=__LINE__, file=__FILE__)) return ! bail out allocate( arrayr42d(ista:iend,jsta:jend)) - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,arrayr42d,arrayr82d) do j=jsta, jend do i=ista, iend arrayr42d(i,j) = arrayr82d(i,j) @@ -816,7 +825,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! Terrain height (*G later) if(trim(fieldname)=='hgtsfc') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,fis,arrayr42d) do j=jsta,jend do i=ista, iend fis(i,j)=arrayr42d(i,j) @@ -836,7 +845,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! PBL height using nemsio if(trim(fieldname)=='hpbl') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,pblh,arrayr42d) do j=jsta,jend do i=ista, iend pblh(i,j)=arrayr42d(i,j) @@ -846,7 +855,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! frictional velocity if(trim(fieldname)=='fricv') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,ustar,arrayr42d) do j=jsta,jend do i=ista, iend ustar(i,j)=arrayr42d(i,j) @@ -856,7 +865,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! roughness length if(trim(fieldname)=='sfcr') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,z0,arrayr42d) do j=jsta,jend do i=ista, iend z0(i,j)=arrayr42d(i,j) @@ -866,7 +875,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! sfc exchange coeff if(trim(fieldname)=='sfexc') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,sfcexc,arrayr42d) do j=jsta,jend do i=ista, iend sfcexc(i,j)=arrayr42d(i,j) @@ -876,7 +885,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! aerodynamic conductance if(trim(fieldname)=='acond') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,acond,arrayr42d) do j=jsta,jend do i=ista, iend acond(i,j)=arrayr42d(i,j) @@ -886,7 +895,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! surface potential T if(trim(fieldname)=='tmpsfc') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,arrayr42d,ths) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) /= spval) then @@ -898,7 +907,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! convective precip in m per physics time step if(trim(fieldname)=='cpratb_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,dtq2,arrayr42d,avgcprate) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) /= spval) & @@ -909,7 +918,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! continuous bucket convective precip in m per physics time step if(trim(fieldname)=='cprat_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,dtq2,arrayr42d,avgcprate_cont) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) /= spval) then @@ -921,7 +930,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged bucketed precip rate if(trim(fieldname)=='prateb_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,dtq2,arrayr42d,avgprec) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) /= spval) then @@ -933,7 +942,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged continuous precip rate in m per physics time step if(trim(fieldname)=='prate_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,dtq2,arrayr42d,avgprec_cont) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) /= spval) then @@ -945,7 +954,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! precip rate in m per physics time step if(trim(fieldname)=='tprcp') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,dtq2,dtp,arrayr42d,prec) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) /= spval) then @@ -957,7 +966,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! convective precip rate in m per physics time step if(trim(fieldname)=='cnvprcp') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,dtq2,dtp,arrayr42d,cprate) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) /= spval) then @@ -969,7 +978,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst snow water eqivalent if(trim(fieldname)=='weasd') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,sno,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend sno(i,j) = arrayr42d(i,j) @@ -980,7 +989,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! ave snow cover if(trim(fieldname)=='snowc_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,snoavg,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend snoavg(i,j) = arrayr42d(i,j) @@ -992,7 +1001,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! snow depth in mm if(trim(fieldname)=='snod') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,si,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend si(i,j) = arrayr42d(i,j) @@ -1004,7 +1013,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! 2m potential T (computed later) if(trim(fieldname)=='tmp2m') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,tshltr,arrayr42d) do j=jsta,jend do i=ista, iend tshltr(i,j) = arrayr42d(i,j) @@ -1014,7 +1023,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! surface potential T if(trim(fieldname)=='spfh2m') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,qshltr,arrayr42d) do j=jsta,jend do i=ista, iend qshltr(i,j) = arrayr42d(i,j) @@ -1024,7 +1033,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! mid day avg albedo in fraction if(trim(fieldname)=='albdo_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,avgalbedo,arrayr42d) do j=jsta,jend do i=ista, iend avgalbedo(i,j) = arrayr42d(i,j) @@ -1037,7 +1046,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged column cloud fraction if(trim(fieldname)=='tcdc_aveclm') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,avgtcdc,arrayr42d) do j=jsta,jend do i=ista, iend avgtcdc(i,j) = arrayr42d(i,j) @@ -1050,7 +1059,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! maximum snow albedo in fraction if(trim(fieldname)=='snoalb') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,mxsnal,arrayr42d) do j=jsta,jend do i=ista, iend mxsnal(i,j) = arrayr42d(i,j) @@ -1063,7 +1072,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! ave high cloud fraction if(trim(fieldname)=='tcdc_avehcl') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,avgcfrach,arrayr42d) do j=jsta,jend do i=ista, iend avgcfrach(i,j) = arrayr42d(i,j) @@ -1076,7 +1085,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! ave low cloud fraction if(trim(fieldname)=='tcdc_avelcl') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,avgcfracl,arrayr42d) do j=jsta,jend do i=ista, iend avgcfracl(i,j) = arrayr42d(i,j) @@ -1089,7 +1098,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! ave middle cloud fraction if(trim(fieldname)=='tcdc_avemcl') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,avgcfracm,arrayr42d) do j=jsta,jend do i=ista, iend avgcfracm(i,j) = arrayr42d(i,j) @@ -1102,7 +1111,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst convective cloud fraction if(trim(fieldname)=='tcdccnvcl') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,cnvcfr,arrayr42d) do j=jsta,jend do i=ista, iend cnvcfr(i,j) = arrayr42d(i,j) @@ -1115,7 +1124,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! slope type if(trim(fieldname)=='sltyp') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,arrayr42d,islope) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) < spval) then @@ -1129,7 +1138,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged column cloud fraction if(trim(fieldname)=='cnwat') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,cmc,arrayr42d,sm) do j=jsta,jend do i=ista, iend cmc(i,j) = arrayr42d(i,j) @@ -1141,7 +1150,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! frozen precip fraction if(trim(fieldname)=='cpofp') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,arrayr42d,sr) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) /= spval) then @@ -1156,9 +1165,9 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! sea ice skin temperature if(trim(fieldname)=='tisfc') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,sice,arrayr42d,ti) do j=jsta,jend - do i=1,im + do i=ista,iend if (arrayr42d(i,j) /= spval) then ti(i,j) = arrayr42d(i,j) if (sice(i,j) == spval .or. sice(i,j) == 0.) ti(i,j)=spval @@ -1172,7 +1181,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! vegetation fraction if(trim(fieldname)=='veg') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,vegfrc,arrayr42d,sm) do j=jsta,jend do i=ista, iend vegfrc(i,j) = arrayr42d(i,j) @@ -1188,7 +1197,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! liquid volumetric soil mpisture in fraction if(trim(fieldname)=='soill1') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,sh2o,arrayr42d,sm) do j=jsta,jend do i=ista, iend sh2o(i,j,1) = arrayr42d(i,j) @@ -1199,7 +1208,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! liquid volumetric soil mpisture in fraction if(trim(fieldname)=='soill2') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,sh2o,arrayr42d,sm) do j=jsta,jend do i=ista, iend sh2o(i,j,2) = arrayr42d(i,j) @@ -1210,7 +1219,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! liquid volumetric soil mpisture in fraction if(trim(fieldname)=='soill3') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,sh2o,arrayr42d,sm) do j=jsta,jend do i=ista, iend sh2o(i,j,3) = arrayr42d(i,j) @@ -1221,7 +1230,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! liquid volumetric soil mpisture in fraction if(trim(fieldname)=='soill4') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,sh2o,arrayr42d,sm) do j=jsta,jend do i=ista, iend sh2o(i,j,4) = arrayr42d(i,j) @@ -1232,7 +1241,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! volumetric soil moisture if(trim(fieldname)=='soilw1') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,smc,arrayr42d,sm) do j=jsta,jend do i=ista, iend smc(i,j,1) = arrayr42d(i,j) @@ -1243,7 +1252,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! volumetric soil moisture if(trim(fieldname)=='soilw2') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,smc,arrayr42d,sm) do j=jsta,jend do i=ista, iend smc(i,j,2) = arrayr42d(i,j) @@ -1254,7 +1263,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! volumetric soil moisture if(trim(fieldname)=='soilw3') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,smc,arrayr42d,sm) do j=jsta,jend do i=ista, iend smc(i,j,3) = arrayr42d(i,j) @@ -1265,7 +1274,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! volumetric soil moisture if(trim(fieldname)=='soilw4') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,smc,arrayr42d,sm) do j=jsta,jend do i=ista, iend smc(i,j,4) = arrayr42d(i,j) @@ -1276,7 +1285,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! soil temperature if(trim(fieldname)=='soilt1') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,stc,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend stc(i,j,1) = arrayr42d(i,j) @@ -1288,7 +1297,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! soil temperature if(trim(fieldname)=='soilt2') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,stc,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend stc(i,j,2) = arrayr42d(i,j) @@ -1300,7 +1309,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! soil temperature if(trim(fieldname)=='soilt3') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,stc,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend stc(i,j,3) = arrayr42d(i,j) @@ -1312,7 +1321,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! soil temperature if(trim(fieldname)=='soilt4') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,stc,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend stc(i,j,4) = arrayr42d(i,j) @@ -1324,7 +1333,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged incoming sfc longwave if(trim(fieldname)=='dlwrf_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,alwin,arrayr42d) do j=jsta,jend do i=ista, iend alwin(i,j) = arrayr42d(i,j) @@ -1334,7 +1343,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst incoming sfc longwave if(trim(fieldname)=='dlwrf') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,rlwin,arrayr42d) do j=jsta,jend do i=ista, iend rlwin(i,j) = arrayr42d(i,j) @@ -1344,7 +1353,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged outgoing sfc longwave, CLDRAD puts a minus sign if(trim(fieldname)=='ulwrf_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,alwout,arrayr42d) do j=jsta,jend do i=ista, iend alwout(i,j) = arrayr42d(i,j) @@ -1355,7 +1364,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst outgoing sfc longwave if(trim(fieldname)=='ulwrf') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,radot,arrayr42d) do j=jsta,jend do i=ista, iend radot(i,j) = arrayr42d(i,j) @@ -1365,7 +1374,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged outgoing model top longwave if(trim(fieldname)=='ulwrf_avetoa') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,alwtoa,arrayr42d) do j=jsta,jend do i=ista, iend alwtoa(i,j) = arrayr42d(i,j) @@ -1375,7 +1384,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged incoming sfc shortwave if(trim(fieldname)=='dswrf_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,aswin,arrayr42d) do j=jsta,jend do i=ista, iend aswin(i,j) = arrayr42d(i,j) @@ -1385,7 +1394,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst incoming sfc shortwave if(trim(fieldname)=='dswrf') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,rswin,arrayr42d) do j=jsta,jend do i=ista, iend rswin(i,j) = arrayr42d(i,j) @@ -1395,7 +1404,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged incoming sfc uv-b if(trim(fieldname)=='duvb_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,auvbin,arrayr42d) do j=jsta,jend do i=ista, iend auvbin(i,j) = arrayr42d(i,j) @@ -1405,7 +1414,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged incoming sfc clear sky uv-b if(trim(fieldname)=='cduvb_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,auvbinc,arrayr42d) do j=jsta,jend do i=ista, iend auvbinc(i,j) = arrayr42d(i,j) @@ -1415,7 +1424,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged outgoing sfc shortwave,CLDRAD puts a minus sign if(trim(fieldname)=='uswrf_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,aswout,arrayr42d) do j=jsta,jend do i=ista, iend aswout(i,j) = arrayr42d(i,j) @@ -1426,7 +1435,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst outgoing sfc shortwave if(trim(fieldname)=='uswrf') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,rswout,arrayr42d) do j=jsta,jend do i=ista, iend rswout(i,j) = arrayr42d(i,j) @@ -1436,7 +1445,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged model top incoming shortwave if(trim(fieldname)=='dswrf_avetoa') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,aswintoa,arrayr42d) do j=jsta,jend do i=ista, iend aswintoa(i,j) = arrayr42d(i,j) @@ -1446,7 +1455,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! ime averaged model top outgoing shortwave if(trim(fieldname)=='uswrf_avetoa') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,aswtoa,arrayr42d) do j=jsta,jend do i=ista, iend aswtoa(i,j) = arrayr42d(i,j) @@ -1457,7 +1466,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged surface sensible heat flux, multiplied by -1 because ! wrf model fluxhas reversed sign convention using gfsio if(trim(fieldname)=='shtfl_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,sfcshx,arrayr42d) do j=jsta,jend do i=ista, iend sfcshx(i,j) = arrayr42d(i,j) @@ -1468,7 +1477,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst surface sensible heat flux if(trim(fieldname)=='shtfl') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,twbs,arrayr42d) do j=jsta,jend do i=ista, iend twbs(i,j) = arrayr42d(i,j) @@ -1480,7 +1489,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged surface latent heat flux, multiplied by -1 because ! wrf model flux has reversed sign vonvention using gfsio if(trim(fieldname)=='lhtfl_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,sfclhx,arrayr42d) do j=jsta,jend do i=ista, iend sfclhx(i,j) = arrayr42d(i,j) @@ -1491,7 +1500,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst surface latent heat flux if(trim(fieldname)=='lhtfl') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,qwbs,arrayr42d) do j=jsta,jend do i=ista, iend qwbs(i,j) = arrayr42d(i,j) @@ -1502,7 +1511,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged ground heat flux if(trim(fieldname)=='gflux_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,subshx,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend subshx(i,j) = arrayr42d(i,j) @@ -1513,7 +1522,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst ground heat flux if(trim(fieldname)=='gflux') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,grnflx,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend grnflx(i,j) = arrayr42d(i,j) @@ -1524,7 +1533,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged zonal momentum flux if(trim(fieldname)=='uflx_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,sfcux,arrayr42d) do j=jsta,jend do i=ista, iend sfcux(i,j) = arrayr42d(i,j) @@ -1534,7 +1543,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged meridional momentum flux if(trim(fieldname)=='vflx_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,sfcvx,arrayr42d) do j=jsta,jend do i=ista, iend sfcvx(i,j) = arrayr42d(i,j) @@ -1544,7 +1553,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged zonal gravity wave stress if(trim(fieldname)=='u-gwd_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,gtaux,arrayr42d) do j=jsta,jend do i=ista, iend gtaux(i,j) = arrayr42d(i,j) @@ -1554,7 +1563,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged meridional gravity wave stress if(trim(fieldname)=='v-gwd_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,gtauy,arrayr42d) do j=jsta,jend do i=ista, iend gtauy(i,j) = arrayr42d(i,j) @@ -1564,7 +1573,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged accumulated potential evaporation if(trim(fieldname)=='pevpr_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,avgpotevp,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend avgpotevp(i,j) = arrayr42d(i,j) @@ -1575,7 +1584,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst potential evaporation if(trim(fieldname)=='pevpr') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,potevp,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend potevp(i,j) = arrayr42d(i,j) @@ -1586,7 +1595,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! 10 m u if(trim(fieldname)=='ugrd10m') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,u10,arrayr42d,u10h) do j=jsta,jend do i=ista, iend u10(i,j) = arrayr42d(i,j) @@ -1597,7 +1606,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! 10 m v if(trim(fieldname)=='vgrd10m') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,v10,arrayr42d,v10h) do j=jsta,jend do i=ista, iend v10(i,j) = arrayr42d(i,j) @@ -1608,7 +1617,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! vegetation type if(trim(fieldname)=='vtype') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,arrayr42d,ivgtyp) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) < spval) then @@ -1622,7 +1631,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! soil type if(trim(fieldname)=='sotyp') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,arrayr42d,isltyp) do j=jsta,jend do i=ista, iend if (arrayr42d(i,j) < spval) then @@ -1636,7 +1645,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst cloud top pressure if(trim(fieldname)=='prescnvclt') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,ptop,arrayr42d) do j=jsta,jend do i=ista, iend ptop(i,j) = arrayr42d(i,j) @@ -1647,7 +1656,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! inst cloud bottom pressure if(trim(fieldname)=='prescnvclb') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,pbot,arrayr42d) do j=jsta,jend do i=ista, iend pbot(i,j) = arrayr42d(i,j) @@ -1658,7 +1667,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged low cloud top pressure if(trim(fieldname)=='pres_avelct') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,ptopl,arrayr42d) do j=jsta,jend do i=ista, iend ptopl(i,j) = arrayr42d(i,j) @@ -1668,7 +1677,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged low cloud bottom pressure if(trim(fieldname)=='pres_avelcb') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,pbotl,arrayr42d) do j=jsta,jend do i=ista, iend pbotl(i,j) = arrayr42d(i,j) @@ -1678,7 +1687,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged low cloud top temperature if(trim(fieldname)=='tmp_avelct') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,ttopl,arrayr42d) do j=jsta,jend do i=ista, iend ttopl(i,j) = arrayr42d(i,j) @@ -1688,7 +1697,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged middle cloud top pressure if(trim(fieldname)=='pres_avemct') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,ptopm,arrayr42d) do j=jsta,jend do i=ista, iend ptopm(i,j) = arrayr42d(i,j) @@ -1698,7 +1707,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged middle cloud bottom pressure if(trim(fieldname)=='pres_avemcb') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,pbotm,arrayr42d) do j=jsta,jend do i=ista, iend pbotm(i,j) = arrayr42d(i,j) @@ -1708,7 +1717,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged middle cloud top temperature if(trim(fieldname)=='tmp_avemct') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,ttopm,arrayr42d) do j=jsta,jend do i=ista, iend ttopm(i,j) = arrayr42d(i,j) @@ -1718,7 +1727,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged high cloud top pressure if(trim(fieldname)=='pres_avehct') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,ptoph,arrayr42d) do j=jsta,jend do i=ista, iend ptoph(i,j) = arrayr42d(i,j) @@ -1728,7 +1737,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged high cloud bottom pressure if(trim(fieldname)=='pres_avehcb') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,pboth,arrayr42d) do j=jsta,jend do i=ista, iend pboth(i,j) = arrayr42d(i,j) @@ -1738,7 +1747,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged high cloud top temperature if(trim(fieldname)=='tmp_avehct') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,ttoph,arrayr42d) do j=jsta,jend do i=ista, iend ttoph(i,j) = arrayr42d(i,j) @@ -1748,7 +1757,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged boundary layer cloud cover if(trim(fieldname)=='tcdc_avebndcl') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,pblcfr,arrayr42d) do j=jsta,jend do i=ista, iend pblcfr(i,j) = arrayr42d(i,j) @@ -1759,7 +1768,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! cloud work function if(trim(fieldname)=='cwork_aveclm') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,cldwork,arrayr42d) do j=jsta,jend do i=ista, iend cldwork(i,j) = arrayr42d(i,j) @@ -1769,7 +1778,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! water runoff if(trim(fieldname)=='watr_acc') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,runoff,arrayr42d,sm) do j=jsta,jend do i=ista, iend runoff(i,j) = arrayr42d(i,j) @@ -1780,7 +1789,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! shelter max temperature if(trim(fieldname)=='tmax_max2m') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,maxtshltr,arrayr42d) do j=jsta,jend do i=ista, iend maxtshltr(i,j) = arrayr42d(i,j) @@ -1790,7 +1799,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! shelter min temperature if(trim(fieldname)=='tmin_min2m') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,mintshltr,arrayr42d) do j=jsta,jend do i=ista, iend mintshltr(i,j) = arrayr42d(i,j) @@ -1800,7 +1809,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! ice thickness if(trim(fieldname)=='icetk') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,dzice,arrayr42d) do j=jsta,jend do i=ista, iend dzice(i,j) = arrayr42d(i,j) @@ -1810,7 +1819,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! wilting point if(trim(fieldname)=='wilt') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend, spval,smcwlt,arrayr42d,sm) do j=jsta,jend do i=ista, iend smcwlt(i,j) = arrayr42d(i,j) @@ -1821,7 +1830,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! sunshine duration if(trim(fieldname)=='sunsd_acc') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,suntime,arrayr42d) do j=jsta,jend do i=ista, iend suntime(i,j) = arrayr42d(i,j) @@ -1831,7 +1840,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! field capacity if(trim(fieldname)=='fldcp') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,fieldcapa,arrayr42d,sm) do j=jsta,jend do i=ista, iend fieldcapa(i,j) = arrayr42d(i,j) @@ -1842,7 +1851,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged surface visible beam downward solar flux if(trim(fieldname)=='vbdsf_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,avisbeamswin,arrayr42d) do j=jsta,jend do i=ista, iend avisbeamswin(i,j) = arrayr42d(i,j) @@ -1852,7 +1861,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged surface visible diffuse downward solar flux if(trim(fieldname)=='vddsf_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,avisdiffswin,arrayr42d) do j=jsta,jend do i=ista, iend avisdiffswin(i,j) = arrayr42d(i,j) @@ -1862,7 +1871,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged surface near IR beam downward solar flux if(trim(fieldname)=='nbdsf_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,airbeamswin,arrayr42d) do j=jsta,jend do i=ista, iend airbeamswin(i,j) = arrayr42d(i,j) @@ -1872,7 +1881,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged surface near IR diffuse downward solar flux if(trim(fieldname)=='nddsf_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,airdiffswin,arrayr42d) do j=jsta,jend do i=ista, iend airdiffswin(i,j) = arrayr42d(i,j) @@ -1882,7 +1891,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged surface clear sky outgoing LW if(trim(fieldname)=='csulf') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,alwoutc,arrayr42d) do j=jsta,jend do i=ista, iend alwoutc(i,j) = arrayr42d(i,j) @@ -1892,7 +1901,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged TOA clear sky outgoing LW if(trim(fieldname)=='csulftoa') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,alwtoac,arrayr42d) do j=jsta,jend do i=ista, iend alwtoac(i,j) = arrayr42d(i,j) @@ -1902,7 +1911,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged surface clear sky outgoing SW if(trim(fieldname)=='csusf') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,aswoutc,arrayr42d) do j=jsta,jend do i=ista, iend aswoutc(i,j) = arrayr42d(i,j) @@ -1912,7 +1921,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged TOA clear sky outgoing SW if(trim(fieldname)=='csusftoa') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,aswtoac,arrayr42d) do j=jsta,jend do i=ista, iend aswtoac(i,j) = arrayr42d(i,j) @@ -1922,7 +1931,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged surface clear sky incoming LW if(trim(fieldname)=='csdlf') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,alwinc,arrayr42d) do j=jsta,jend do i=ista, iend alwinc(i,j) = arrayr42d(i,j) @@ -1932,7 +1941,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! time averaged surface clear sky incoming SW if(trim(fieldname)=='csdsf') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,aswinc,arrayr42d) do j=jsta,jend do i=ista, iend aswinc(i,j) = arrayr42d(i,j) @@ -1942,7 +1951,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! shelter max specific humidity if(trim(fieldname)=='spfhmax_max2m') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,maxqshltr,arrayr42d) do j=jsta,jend do i=ista, iend maxqshltr(i,j) = arrayr42d(i,j) @@ -1952,7 +1961,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! shelter min temperature if(trim(fieldname)=='spfhmin_min2m') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,minqshltr,arrayr42d) do j=jsta,jend do i=ista, iend minqshltr(i,j) = arrayr42d(i,j) @@ -1962,7 +1971,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! storm runoffs if(trim(fieldname)=='ssrun_acc') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,ssroff,arrayr42d,sm) do j=jsta,jend do i=ista, iend ssroff(i,j) = arrayr42d(i,j) @@ -1973,7 +1982,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! direct soil evaporation if(trim(fieldname)=='evbs_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,avgedir,arrayr42d,sm) do j=jsta,jend do i=ista, iend avgedir(i,j) = arrayr42d(i,j) @@ -1984,7 +1993,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! canopy water evap if(trim(fieldname)=='evcw_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,avgecan,arrayr42d,sm) do j=jsta,jend do i=ista, iend avgecan(i,j) = arrayr42d(i,j) @@ -1995,7 +2004,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! plant transpiration if(trim(fieldname)=='trans_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,avgetrans,arrayr42d,sm) do j=jsta,jend do i=ista, iend avgetrans(i,j) = arrayr42d(i,j) @@ -2006,7 +2015,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! snow sublimation if(trim(fieldname)=='sbsno_ave') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,avgesnow,arrayr42d,sm,sice) do j=jsta,jend do i=ista, iend avgesnow(i,j) = arrayr42d(i,j) @@ -2017,7 +2026,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! total soil moisture if(trim(fieldname)=='soilm') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,smstot,arrayr42d,sm) do j=jsta,jend do i=ista, iend smstot(i,j) = arrayr42d(i,j) @@ -2028,7 +2037,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! snow phase change heat flux if(trim(fieldname)=='snohf') then - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,snopcx,arrayr42d,sm) do j=jsta,jend do i=ista, iend snopcx(i,j) = arrayr42d(i,j) @@ -2050,7 +2059,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & allocate(arrayr43d(ista:iend,jsta:jend,kstart:kend)) arrayr43d = 0. do k=kstart,kend - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,k,arrayr43d,arrayr83d) do j=jsta,jend do i=ista,iend arrayr43d(i,j,k) = arrayr83d(i,j,k) @@ -2061,7 +2070,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level T if(trim(fieldname)=='tmp') then - !$omp parallel do private(i,j,l) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,t,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2071,7 +2080,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & enddo !! sig4 - !$omp parallel do private(i,j) + !$omp parallel do default(none) private(i,j,tlmh) shared(lm,jsta,jend,ista,iend,t,sigt4) do j=jsta,jend do i=ista, iend tlmh = t(i,j,lm) * t(i,j,lm) @@ -2082,7 +2091,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level spfh if(trim(fieldname)=='spfh') then - !$omp parallel do private(i,j,l) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,q,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2094,7 +2103,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level u wind if(trim(fieldname)=='ugrd') then - !$omp parallel do private(i,j,l) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,uh,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2106,7 +2115,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level v wind if(trim(fieldname)=='vgrd') then - !$omp parallel do private(i,j,l) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,vh,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2118,7 +2127,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level pressure thinkness if(trim(fieldname)=='dpres') then - !$omp parallel do private(i,j,l) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,dpres,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2130,7 +2139,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level gh thinkness, model output negative delz if(trim(fieldname)=='delz') then - !$omp parallel do private(i,j,l) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,zint,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2142,7 +2151,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level w if(trim(fieldname)=='dzdt') then - !$omp parallel do private(i,j,l) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,wh,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2154,7 +2163,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level ozone mixing ratio if(trim(fieldname)=='o3mr') then - !$omp parallel do private(i,j,l) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,o3,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2168,7 +2177,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & if (imp_physics == 11) then ! model level cloud water mixing ratio if(trim(fieldname)=='clwmr') then - !$omp parallel do private(i,j,k) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,qqw,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2180,7 +2189,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level ice mixing ratio if(trim(fieldname)=='icmr') then - !$omp parallel do private(i,j,k) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,qqi,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2192,7 +2201,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level rain water mixing ratio if(trim(fieldname)=='rwmr') then - !$omp parallel do private(i,j,k) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,qqr,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2204,7 +2213,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level snow mixing ratio if(trim(fieldname)=='snmr') then - !$omp parallel do private(i,j,k) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,qqs,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2216,7 +2225,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level rain water mixing ratio if(trim(fieldname)=='grle') then - !$omp parallel do private(i,j,k) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,qqg,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2230,7 +2239,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level cloud amount if(trim(fieldname)=='cld_amt') then - !$omp parallel do private(i,j,k) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,cfr,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2242,7 +2251,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level ref3d if(trim(fieldname)=='ref3D') then - !$omp parallel do private(i,j,k) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,ref_10cm,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2255,7 +2264,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! model level ref3d if(trim(fieldname)=='tke') then - !$omp parallel do private(i,j,k) + !$omp parallel do default(none) private(i,j,l) shared(lm,jsta,jend,ista,iend,q2,arrayr43d) do l=1,lm do j=jsta,jend do i=ista, iend @@ -2280,7 +2289,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & enddo file_loop_all ! recompute full layer of zint -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(jsta,jend,im,lp1,spval,zint,fis) do j=jsta,jend do i=1,im if (fis(i,j) /= spval) then @@ -2291,7 +2300,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & enddo do l=lm,1,-1 -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(l,jsta,jend,im,omga,wh,dpres,zint) do j=jsta,jend do i=1,im omga(i,j,l) = (-1.) * wh(i,j,l) * dpres(i,j,l)/zint(i,j,l) @@ -2301,7 +2310,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & enddo ! compute pint from top down -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(jsta,jend,im,ak5,pint) do j=jsta,jend do i=1,im pint(i,j,1) = ak5(1) @@ -2309,7 +2318,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & end do do l=2,lp1 -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(l,jsta,jend,im,pint,dpres) do j=jsta,jend do i=1,im pint(i,j,l) = pint(i,j,l-1) + dpres(i,j,l-1) @@ -2319,6 +2328,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & !compute pmid from averaged two layer pint do l=lm,1,-1 +!$omp parallel do default(none) private(i,j) shared(l,jsta,jend,im,pmid,pint) do j=jsta,jend do i=1,im pmid(i,j,l) = 0.5*(pint(i,j,l)+pint(i,j,l+1)) @@ -2326,7 +2336,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & enddo enddo -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(jsta,jend,im,spval,pt,pd,pint) do j=jsta,jend do i=1,im pd(i,j) = spval @@ -2337,7 +2347,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! compute alpint do l=lp1,1,-1 -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(l,jsta,jend,im,alpint,pint) do j=jsta,jend do i=1,im alpint(i,j,l)=log(pint(i,j,l)) @@ -2347,7 +2357,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! compute zmid do l=lm,1,-1 -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(l,jsta,jend,im,zmid,zint,pmid,alpint) do j=jsta,jend do i=1,im zmid(i,j,l)=zint(i,j,l+1)+(zint(i,j,l)-zint(i,j,l+1))* & @@ -2365,7 +2375,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! minval(alpint(1:im,jsta:jend,1)) ! surface potential T, and potential T at roughness length -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,spval,lp1,sm,ths,sst,thz0,pint) do j=jsta,jend do i=ista, iend !assign sst @@ -2384,7 +2394,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & ! compute cwm for gfdlmp if( imp_physics == 11 ) then do l=1,lm -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(l,jsta,jend,ista,iend,cwm,qqg,qqs,qqr,qqi,qqw) do j=jsta,jend do i=ista,iend cwm(i,j,l)=qqg(i,j,l)+qqs(i,j,l)+qqr(i,j,l)+qqi(i,j,l)+qqw(i,j,l) @@ -2394,7 +2404,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & endif ! estimate 2m pres and convert t2m to theta -!$omp parallel do private(i,j) +!$omp parallel do default(none) private(i,j) shared(jsta,jend,ista,iend,lm,pshltr,pint,tshltr) do j=jsta,jend do i=ista, iend pshltr(I,J)=pint(i,j,lm+1)*EXP(-0.068283/tshltr(i,j)) @@ -2448,7 +2458,7 @@ subroutine set_postvars_gfs(wrt_int_state,mpicomp,setvar_atmfile, & 51 format(8(F8.1,1X)) endif ! -!$omp parallel do private(l) +!$omp parallel do default(none) private(l) shared(lsm,alsl,spl) do l = 1,lsm alsl(l) = log(spl(l)) end do From bf5bfa1957d3684f3372f93a1cc3ac3a63a6d923 Mon Sep 17 00:00:00 2001 From: Dom Heinzeller Date: Wed, 4 Mar 2020 09:34:36 -0700 Subject: [PATCH 3/4] Update submodule pointer for .gitmodules for code review and testing --- .gitmodules | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 2fdeca40a..13eb4af2e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,7 +1,9 @@ [submodule "atmos_cubed_sphere"] path = atmos_cubed_sphere - url = https://github.com/NCAR/GFDL_atmos_cubed_sphere - branch = dtc/develop + #url = https://github.com/NCAR/GFDL_atmos_cubed_sphere + #branch = dtc/develop + url = https://github.com/climbfuji/GFDL_atmos_cubed_sphere + branch = update_dtc_develop_from_emc [submodule "ccpp/framework"] path = ccpp/framework url = https://github.com/NCAR/ccpp-framework From c6e17ef0f904f896dee6aceda6de57126c48f675 Mon Sep 17 00:00:00 2001 From: Dom Heinzeller Date: Mon, 9 Mar 2020 12:42:42 -0600 Subject: [PATCH 4/4] Revert change to .gitmodules, update submodule pointer for atmos_cubed_sphere --- .gitmodules | 6 ++---- atmos_cubed_sphere | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.gitmodules b/.gitmodules index 13eb4af2e..2fdeca40a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,7 @@ [submodule "atmos_cubed_sphere"] path = atmos_cubed_sphere - #url = https://github.com/NCAR/GFDL_atmos_cubed_sphere - #branch = dtc/develop - url = https://github.com/climbfuji/GFDL_atmos_cubed_sphere - branch = update_dtc_develop_from_emc + url = https://github.com/NCAR/GFDL_atmos_cubed_sphere + branch = dtc/develop [submodule "ccpp/framework"] path = ccpp/framework url = https://github.com/NCAR/ccpp-framework diff --git a/atmos_cubed_sphere b/atmos_cubed_sphere index 4858d3383..4279bf78e 160000 --- a/atmos_cubed_sphere +++ b/atmos_cubed_sphere @@ -1 +1 @@ -Subproject commit 4858d33838adcd87c1cd000ab4737dc0e7e56731 +Subproject commit 4279bf78eb4ae0bad908eb991e3fb216018b7aea