From 94535291dc53b2ac21041ef521d6cf4056f79a34 Mon Sep 17 00:00:00 2001 From: Denise Worthen Date: Fri, 25 Nov 2022 08:58:08 -0500 Subject: [PATCH] Update CICE (#54) * update to include recent PRs to Consortium/main --- cicecore/cicedyn/analysis/ice_diagnostics.F90 | 156 ++-- cicecore/cicedyn/analysis/ice_history.F90 | 2 +- cicecore/cicedyn/analysis/ice_history_fsd.F90 | 53 +- .../cicedyn/analysis/ice_history_pond.F90 | 30 +- .../cicedyn/analysis/ice_history_shared.F90 | 155 +++- cicecore/cicedyn/dynamics/ice_dyn_eap.F90 | 150 ++- .../cicedyn/dynamics/ice_transport_driver.F90 | 15 - .../cicedyn/dynamics/ice_transport_remap.F90 | 1 + cicecore/cicedyn/general/ice_flux.F90 | 22 +- cicecore/cicedyn/general/ice_forcing.F90 | 197 +++- cicecore/cicedyn/general/ice_forcing_bgc.F90 | 144 ++- cicecore/cicedyn/general/ice_state.F90 | 6 +- cicecore/cicedyn/general/ice_step_mod.F90 | 142 ++- .../infrastructure/comm/mpi/ice_boundary.F90 | 64 ++ .../comm/mpi/ice_global_reductions.F90 | 69 -- .../comm/serial/ice_boundary.F90 | 64 ++ .../comm/serial/ice_global_reductions.F90 | 69 -- .../cicedyn/infrastructure/ice_domain.F90 | 1 + .../infrastructure/ice_restart_driver.F90 | 50 +- .../io/io_binary/ice_restart.F90 | 71 -- .../io/io_netcdf/ice_restart.F90 | 15 - cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 534 ++++++----- cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 | 54 +- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 48 +- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 877 +++++++++--------- cicecore/cicedynB/general/ice_init.F90 | 159 +--- cicecore/cicedynB/infrastructure/ice_grid.F90 | 443 ++++++--- .../infrastructure/io/io_pio2/ice_restart.F90 | 15 - .../drivers/direct/hadgem3/CICE_InitMod.F90 | 30 - .../drivers/direct/hadgem3/CICE_RunMod.F90 | 15 - .../direct/nemo_concepts/CICE_InitMod.F90 | 30 - .../direct/nemo_concepts/CICE_RunMod.F90 | 15 - cicecore/drivers/mct/cesm1/CICE_InitMod.F90 | 30 - cicecore/drivers/mct/cesm1/CICE_RunMod.F90 | 15 - .../drivers/mct/cesm1/ice_import_export.F90 | 6 +- .../drivers/mct/cesm1/ice_prescribed_mod.F90 | 10 +- cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 | 30 - cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 | 15 - .../drivers/nuopc/cmeps/ice_comp_nuopc.F90 | 187 ++-- .../drivers/nuopc/cmeps/ice_import_export.F90 | 31 +- cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 | 54 +- .../nuopc/cmeps/ice_prescribed_mod.F90 | 10 +- cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 | 30 - cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 | 15 - .../drivers/standalone/cice/CICE_InitMod.F90 | 30 - .../drivers/standalone/cice/CICE_RunMod.F90 | 15 - .../unittest/gridavgchk/CICE_InitMod.F90 | 30 - .../drivers/unittest/sumchk/CICE_InitMod.F90 | 30 - cicecore/shared/ice_arrays_column.F90 | 1 + cicecore/shared/ice_init_column.F90 | 29 - cicecore/shared/ice_restart_column.F90 | 74 -- configuration/scripts/cice.batch.csh | 17 +- configuration/scripts/cice.launch.csh | 111 ++- configuration/scripts/ice_in | 9 +- .../scripts/machines/Macros.narwhal_aocc | 2 +- .../scripts/machines/Macros.narwhal_intelhpcx | 57 ++ .../scripts/machines/env.badger_intel | 0 .../scripts/machines/env.cheyenne_gnu | 0 .../scripts/machines/env.cheyenne_intel | 0 .../scripts/machines/env.cheyenne_pgi | 0 .../scripts/machines/env.compy_intel | 0 configuration/scripts/machines/env.cori_intel | 0 configuration/scripts/machines/env.gaea_intel | 0 .../scripts/machines/env.narwhal_aocc | 21 +- .../scripts/machines/env.narwhal_cray | 20 +- .../scripts/machines/env.narwhal_gnu | 22 +- .../scripts/machines/env.narwhal_intel | 20 +- configuration/scripts/machines/env.onyx_cray | 0 configuration/scripts/machines/env.onyx_gnu | 0 configuration/scripts/machines/env.onyx_intel | 0 configuration/scripts/machines/env.ppp6_gnu | 3 +- .../scripts/machines/env.ppp6_gnu-impi | 7 + .../scripts/machines/env.ppp6_intel19 | 7 + configuration/scripts/options/set_nml.bgczm | 1 - .../scripts/options/set_nml.boxchan1e | 56 -- .../scripts/options/set_nml.boxchan1n | 56 -- .../scripts/options/set_nml.boxclosed | 34 +- configuration/scripts/options/set_nml.gbox180 | 6 +- configuration/scripts/tests/base_suite.ts | 13 +- configuration/scripts/tests/baseline.script | 6 +- configuration/scripts/tests/gridsys_suite.ts | 40 +- configuration/scripts/tests/io_suite.ts | 16 +- configuration/scripts/tests/nothread_suite.ts | 2 +- configuration/scripts/tests/omp_suite.ts | 12 +- .../convert_restarts.f90 | 4 - doc/source/cice_index.rst | 23 +- doc/source/science_guide/sg_dynamics.rst | 442 +++++---- doc/source/user_guide/ug_case_settings.rst | 14 +- doc/source/user_guide/ug_implementation.rst | 493 ++++++++-- 89 files changed, 3125 insertions(+), 2727 deletions(-) create mode 100644 configuration/scripts/machines/Macros.narwhal_intelhpcx mode change 100755 => 100644 configuration/scripts/machines/env.badger_intel mode change 100755 => 100644 configuration/scripts/machines/env.cheyenne_gnu mode change 100755 => 100644 configuration/scripts/machines/env.cheyenne_intel mode change 100755 => 100644 configuration/scripts/machines/env.cheyenne_pgi mode change 100755 => 100644 configuration/scripts/machines/env.compy_intel mode change 100755 => 100644 configuration/scripts/machines/env.cori_intel mode change 100755 => 100644 configuration/scripts/machines/env.gaea_intel mode change 100755 => 100644 configuration/scripts/machines/env.narwhal_aocc mode change 100755 => 100644 configuration/scripts/machines/env.narwhal_cray mode change 100755 => 100644 configuration/scripts/machines/env.narwhal_gnu mode change 100755 => 100644 configuration/scripts/machines/env.narwhal_intel mode change 100755 => 100644 configuration/scripts/machines/env.onyx_cray mode change 100755 => 100644 configuration/scripts/machines/env.onyx_gnu mode change 100755 => 100644 configuration/scripts/machines/env.onyx_intel delete mode 100644 configuration/scripts/options/set_nml.boxchan1e delete mode 100644 configuration/scripts/options/set_nml.boxchan1n diff --git a/cicecore/cicedyn/analysis/ice_diagnostics.F90 b/cicecore/cicedyn/analysis/ice_diagnostics.F90 index 83eb840d6..8879d6632 100644 --- a/cicecore/cicedyn/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedyn/analysis/ice_diagnostics.F90 @@ -127,7 +127,7 @@ subroutine runtime_diags (dt) alvdr_init, alvdf_init, alidr_init, alidf_init use ice_flux_bgc, only: faero_atm, faero_ocn, fiso_atm, fiso_ocn use ice_global_reductions, only: global_sum, global_sum_prod, global_maxval - use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_ice + use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_ice, grid_average_X2Y use ice_state ! everything ! tcraig, this is likely to cause circular dependency because ice_prescribed_mod is high level routine #ifdef CESMCOUPLED @@ -199,7 +199,8 @@ subroutine runtime_diags (dt) prsnwavg, prhosavg, psmicetot, psmliqtot, psmtot real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1, work2 + uvelT, vvelT, & ! u,v on T points + work1, work2 ! temporary real (kind=dbl_kind), parameter :: & maxval_spval = -0.9_dbl_kind*HUGE(0.0_dbl_kind) ! spval to detect @@ -209,9 +210,6 @@ subroutine runtime_diags (dt) ! returns -HUGE which we want to avoid writing. The ! return value is checked against maxval_spval before writing. -! real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & -! uvelT, vvelT - character(len=*), parameter :: subname = '(runtime_diags)' call icepack_query_parameters(ktherm_out=ktherm, calc_Tsfc_out=calc_Tsfc) @@ -284,51 +282,45 @@ subroutine runtime_diags (dt) if (tr_pond_topo) then !$OMP PARALLEL DO PRIVATE(iblk,i,j,n) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - do n = 1, ncat - work1(i,j,iblk) = work1(i,j,iblk) & - + aicen(i,j,n,iblk) & - * trcrn(i,j,nt_apnd,n,iblk) & - * trcrn(i,j,nt_hpnd,n,iblk) + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + do n = 1, ncat + work1(i,j,iblk) = work1(i,j,iblk) & + + aicen(i,j,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_hpnd,n,iblk) + enddo + enddo enddo - enddo - enddo enddo !$OMP END PARALLEL DO ptotn = global_sum(work1, distrb_info, field_loc_center, tarean) ptots = global_sum(work1, distrb_info, field_loc_center, tareas) endif - ! total ice-snow kinetic energy + ! total ice-snow kinetic energy, on T points. + if (grid_ice == 'B') then + call grid_average_X2Y('A',uvel ,'U',uvelT,'T') + call grid_average_X2Y('A',vvel ,'U',vvelT,'T') + elseif (grid_ice == 'C') then + call grid_average_X2Y('A',uvelE,'E',uvelT,'T') + call grid_average_X2Y('A',vvelN,'N',vvelT,'T') + elseif (grid_ice == 'CD') then + call grid_average_X2Y('A',uvelE,'E',uvelN,'N',uvelT,'T') + call grid_average_X2Y('A',vvelE,'E',vvelN,'N',vvelT,'T') + endif + !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - work1(i,j,iblk) = p5 & - * (rhos*vsno(i,j,iblk) + rhoi*vice(i,j,iblk)) & - * (uvel(i,j,iblk)**2 + vvel(i,j,iblk)**2) + work1(i,j,iblk) = p5 * (rhos*vsno(i,j,iblk) + rhoi*vice(i,j,iblk)) & + * (uvelT(i,j,iblk)**2 + vvelT(i,j,iblk)**2) enddo enddo enddo - ! Eventually do energy diagnostic on T points. -! if (grid_ice == 'CD') then -! !$OMP PARALLEL DO PRIVATE(iblk,i,j) -! do iblk = 1, nblocks -! do j = 1, ny_block -! do i = 1, nx_block -! call grid_average_X2Y('E2TS',uvelE,uvelT) -! call grid_average_X2Y('N2TS',vvelN,vvelT) -! work1(i,j,iblk) = p5 & -! * (rhos*vsno(i,j,iblk) + rhoi*vice(i,j,iblk)) & -! * (uvelT(i,j,iblk)*uvelT(i,j,iblk) & -! + vvelT(i,j,iblk)*vvelT(i,j,iblk)) -! enddo -! enddo -! enddo -! endif -! !$OMP END PARALLEL DO + !$OMP END PARALLEL DO ketotn = c0 ketots = c0 ketotn = global_sum(work1, distrb_info, field_loc_center, tarean) @@ -420,21 +412,22 @@ subroutine runtime_diags (dt) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - work1(i,j,iblk) = max(sqrt(uvelE(i,j,iblk)**2 & - + vvelE(i,j,iblk)**2), & - sqrt(uvelN(i,j,iblk)**2 & - + vvelN(i,j,iblk)**2)) + work1(i,j,iblk) = max(sqrt(uvelE(i,j,iblk)**2 + vvelE(i,j,iblk)**2), & + sqrt(uvelN(i,j,iblk)**2 + vvelN(i,j,iblk)**2)) enddo enddo enddo !$OMP END PARALLEL DO elseif (grid_ice == 'C') then + ! map uvelE to N and vvelN to E then compute max on E and N + call grid_average_X2Y('A',uvelE,'E',work1,'N') ! work1 =~ uvelN + call grid_average_X2Y('A',vvelN,'N',work2,'E') ! work2 =~ vvelE !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - work1(i,j,iblk) = sqrt(uvelE(i,j,iblk)**2 & - + vvelN(i,j,iblk)**2) + work1(i,j,iblk) = max(sqrt(uvelE(i,j,iblk)**2 + work2(i,j,iblk)**2), & + sqrt(work1(i,j,iblk)**2 + vvelN(i,j,iblk)**2)) enddo enddo enddo @@ -444,8 +437,7 @@ subroutine runtime_diags (dt) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - work1(i,j,iblk) = sqrt(uvel(i,j,iblk)**2 & - + vvel(i,j,iblk)**2) + work1(i,j,iblk) = sqrt(uvel(i,j,iblk)**2 + vvel(i,j,iblk)**2) enddo enddo enddo @@ -466,31 +458,31 @@ subroutine runtime_diags (dt) if (umaxn > umax_stab) then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (abs(work1(i,j,iblk) - umaxn) < puny) then - write(nu_diag,*) ' ' - write(nu_diag,*) 'Warning, large ice speed' - write(nu_diag,*) 'my_task, iblk, i, j, umaxn:', & - my_task, iblk, i, j, umaxn - endif - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + if (abs(work1(i,j,iblk) - umaxn) < puny) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Warning, large ice speed' + write(nu_diag,*) 'my_task, iblk, i, j, umaxn:', & + my_task, iblk, i, j, umaxn + endif + enddo + enddo enddo !$OMP END PARALLEL DO elseif (umaxs > umax_stab) then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (abs(work1(i,j,iblk) - umaxs) < puny) then - write(nu_diag,*) ' ' - write(nu_diag,*) 'Warning, large ice speed' - write(nu_diag,*) 'my_task, iblk, i, j, umaxs:', & - my_task, iblk, i, j, umaxs - endif - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + if (abs(work1(i,j,iblk) - umaxs) < puny) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Warning, large ice speed' + write(nu_diag,*) 'my_task, iblk, i, j, umaxs:', & + my_task, iblk, i, j, umaxs + endif + enddo + enddo enddo !$OMP END PARALLEL DO endif ! umax @@ -1357,14 +1349,14 @@ subroutine init_mass_diags do n=1,n_aero !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = trcr(i,j,nt_aero +4*(n-1),iblk)*vsno(i,j,iblk) & - + trcr(i,j,nt_aero+1+4*(n-1),iblk)*vsno(i,j,iblk) & - + trcr(i,j,nt_aero+2+4*(n-1),iblk)*vice(i,j,iblk) & - + trcr(i,j,nt_aero+3+4*(n-1),iblk)*vice(i,j,iblk) - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = trcr(i,j,nt_aero +4*(n-1),iblk)*vsno(i,j,iblk) & + + trcr(i,j,nt_aero+1+4*(n-1),iblk)*vsno(i,j,iblk) & + + trcr(i,j,nt_aero+2+4*(n-1),iblk)*vice(i,j,iblk) & + + trcr(i,j,nt_aero+3+4*(n-1),iblk)*vice(i,j,iblk) + enddo + enddo enddo !$OMP END PARALLEL DO totaeron(n)= global_sum(work1, distrb_info, field_loc_center, tarean) @@ -1377,17 +1369,17 @@ subroutine init_mass_diags totps = c0 !$OMP PARALLEL DO PRIVATE(iblk,i,j,n) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - do n = 1, ncat - work1(i,j,iblk) = work1(i,j,iblk) & - + aicen(i,j,n,iblk) & - * trcrn(i,j,nt_apnd,n,iblk) & - * trcrn(i,j,nt_hpnd,n,iblk) + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + do n = 1, ncat + work1(i,j,iblk) = work1(i,j,iblk) & + + aicen(i,j,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_hpnd,n,iblk) + enddo + enddo enddo - enddo - enddo enddo !$OMP END PARALLEL DO totpn = global_sum(work1, distrb_info, field_loc_center, tarean) diff --git a/cicecore/cicedyn/analysis/ice_history.F90 b/cicecore/cicedyn/analysis/ice_history.F90 index caaa56295..f5e7d0d16 100644 --- a/cicecore/cicedyn/analysis/ice_history.F90 +++ b/cicecore/cicedyn/analysis/ice_history.F90 @@ -3546,7 +3546,7 @@ subroutine accum_hist (dt) call accum_hist_drag (iblk) ! floe size distribution - call accum_hist_fsd (iblk) + call accum_hist_fsd (dt, iblk) ! advanced snow physics call accum_hist_snow (iblk) diff --git a/cicecore/cicedyn/analysis/ice_history_fsd.F90 b/cicecore/cicedyn/analysis/ice_history_fsd.F90 index 50fee99e7..b52db4e05 100644 --- a/cicecore/cicedyn/analysis/ice_history_fsd.F90 +++ b/cicecore/cicedyn/analysis/ice_history_fsd.F90 @@ -137,7 +137,7 @@ subroutine init_hist_fsd_2D do ns = 1, nstreams if (f_wave_sig_ht(1:1) /= 'x') & - call define_hist_field(n_wave_sig_ht,"wave_sig_ht","1",tstr2D, tcstr, & + call define_hist_field(n_wave_sig_ht,"wave_sig_ht","m",tstr2D, tcstr, & "significant height of wind and swell waves", & "from attenuated spectrum in ice", c1, c0, & ns, f_wave_sig_ht) @@ -147,7 +147,7 @@ subroutine init_hist_fsd_2D "for waves", c1, c0, & ns, f_aice_ww) if (f_diam_ww(1:1) /= 'x') & - call define_hist_field(n_diam_ww,"diam_ww","1",tstr2D, tcstr, & + call define_hist_field(n_diam_ww,"diam_ww","m",tstr2D, tcstr, & "Average (number) diameter of floes > Dmin", & "for waves", c1, c0, & ns, f_diam_ww) @@ -216,27 +216,27 @@ subroutine init_hist_fsd_3Df if (histfreq(ns) /= 'x') then if (f_afsd(1:1) /= 'x') & - call define_hist_field(n_afsd,"afsd", "1", tstr3Df, tcstr, & + call define_hist_field(n_afsd,"afsd", "1/m", tstr3Df, tcstr, & "areal floe size distribution", & "per unit bin width ", c1, c0, ns, f_afsd) if (f_dafsd_newi(1:1) /= 'x') & - call define_hist_field(n_dafsd_newi,"dafsd_newi","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_newi,"dafsd_newi","1/s",tstr3Df, tcstr, & "Change in fsd: new ice", & "Avg over freq period", c1, c0, ns, f_dafsd_newi) if (f_dafsd_latg(1:1) /= 'x') & - call define_hist_field(n_dafsd_latg,"dafsd_latg","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_latg,"dafsd_latg","1/s",tstr3Df, tcstr, & "Change in fsd: lateral growth", & "Avg over freq period", c1, c0, ns, f_dafsd_latg) if (f_dafsd_latm(1:1) /= 'x') & - call define_hist_field(n_dafsd_latm,"dafsd_latm","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_latm,"dafsd_latm","1/s",tstr3Df, tcstr, & "Change in fsd: lateral melt", & "Avg over freq period", c1, c0, ns, f_dafsd_latm) if (f_dafsd_wave(1:1) /= 'x') & - call define_hist_field(n_dafsd_wave,"dafsd_wave","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_wave,"dafsd_wave","1/s",tstr3Df, tcstr, & "Change in fsd: waves", & "Avg over freq period", c1, c0, ns, f_dafsd_wave) if (f_dafsd_weld(1:1) /= 'x') & - call define_hist_field(n_dafsd_weld,"dafsd_weld","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_weld,"dafsd_weld","1/s",tstr3Df, tcstr, & "Change in fsd: welding", & "Avg over freq period", c1, c0, ns, f_dafsd_weld) endif ! if (histfreq(ns) /= 'x') @@ -272,7 +272,7 @@ subroutine init_hist_fsd_4Df if (histfreq(ns) /= 'x') then if (f_afsdn(1:1) /= 'x') & - call define_hist_field(n_afsdn,"afsdn","1",tstr4Df, tcstr, & + call define_hist_field(n_afsdn,"afsdn","1/m",tstr4Df, tcstr, & "areal floe size and thickness distribution", & "per unit bin width", c1, c0, ns, f_afsdn) @@ -288,16 +288,19 @@ end subroutine init_hist_fsd_4Df ! accumulate average ice quantities or snapshots ! author: Elizabeth C. Hunke, LANL - subroutine accum_hist_fsd (iblk) + subroutine accum_hist_fsd (dt, iblk) use ice_blocks, only: nx_block, ny_block use ice_constants, only: c0, c1, c2, c4 use ice_history_shared, only: a2D, a3Df, a4Df, nfsd_hist, & ncat_hist, accum_hist_field, n3Dacum, n4Dscum - use ice_state, only: trcrn, aicen_init, vicen, aice_init + use ice_state, only: trcrn, aicen, vicen, aice use ice_arrays_column, only: wave_sig_ht, floe_rad_c, floe_binwidth, & d_afsd_newi, d_afsd_latg, d_afsd_latm, d_afsd_wave, d_afsd_weld + real (kind=dbl_kind), intent(in) :: & + dt ! time step + integer (kind=int_kind), intent(in) :: & iblk ! block index @@ -342,7 +345,7 @@ subroutine accum_hist_fsd (iblk) worka(i,j) = c0 do n = 1, ncat_hist do k = 1, nfsd_hist - worka(i,j) = worka(i,j) + aicen_init(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) + worka(i,j) = worka(i,j) + aicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) end do end do end do @@ -357,7 +360,7 @@ subroutine accum_hist_fsd (iblk) workb = c0 do n = 1, ncat_hist do k = 1, nfsd_hist - workc = aicen_init(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) & + workc = aicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) & / (c4*floeshape*floe_rad_c(k)**2) ! number-mean radius worka(i,j) = worka(i,j) + workc * floe_rad_c(k) @@ -380,7 +383,7 @@ subroutine accum_hist_fsd (iblk) workb = c0 do n = 1, ncat_hist do k = 1, nfsd_hist - workb = workb + aicen_init(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) + workb = workb + aicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) worka(i,j) = worka(i,j) + vicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) end do end do @@ -398,12 +401,12 @@ subroutine accum_hist_fsd (iblk) do j = 1, ny_block do i = 1, nx_block worka(i,j) = c0 - if (aice_init(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then do k = 1, nfsd_hist do n = 1, ncat_hist worka(i,j) = worka(i,j) & + (trcrn(i,j,nt_fsd+k-1,n,iblk) * floe_rad_c(k) & - * aicen_init(i,j,n,iblk)/aice_init(i,j,iblk)) + * aicen(i,j,n,iblk)/aice(i,j,iblk)) end do end do endif @@ -416,12 +419,12 @@ subroutine accum_hist_fsd (iblk) do j = 1, ny_block do i = 1, nx_block worka(i,j) = c0 - if (aice_init(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then do k = 1, nfsd_hist do n = 1, ncat_hist worka(i,j) = worka(i,j) & + (c8*floeshape*trcrn(i,j,nt_fsd+k-1,n,iblk)*floe_rad_c(k) & - *aicen_init(i,j,n,iblk)/(c4*floeshape*floe_rad_c(k)**2 *aice_init(i,j,iblk))) + *aicen(i,j,n,iblk)/(c4*floeshape*floe_rad_c(k)**2 *aice(i,j,iblk))) end do end do endif @@ -442,7 +445,7 @@ subroutine accum_hist_fsd (iblk) worke(i,j,k)=c0 do n = 1, ncat_hist worke(i,j,k) = worke(i,j,k) + (trcrn(i,j,nt_fsd+k-1,n,iblk) & - * aicen_init(i,j,n,iblk)/floe_binwidth(k)) + * aicen(i,j,n,iblk)/floe_binwidth(k)) end do end do end do @@ -452,19 +455,19 @@ subroutine accum_hist_fsd (iblk) if (f_dafsd_newi(1:1)/= 'x') & call accum_hist_field(n_dafsd_newi-n3Dacum, iblk, nfsd_hist, & - d_afsd_newi(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_newi(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_latg(1:1)/= 'x') & call accum_hist_field(n_dafsd_latg-n3Dacum, iblk, nfsd_hist, & - d_afsd_latg(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_latg(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_latm(1:1)/= 'x') & call accum_hist_field(n_dafsd_latm-n3Dacum, iblk, nfsd_hist, & - d_afsd_latm(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_latm(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_wave(1:1)/= 'x') & call accum_hist_field(n_dafsd_wave-n3Dacum, iblk, nfsd_hist, & - d_afsd_wave(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_wave(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_weld(1:1)/= 'x') & call accum_hist_field(n_dafsd_weld-n3Dacum, iblk, nfsd_hist, & - d_afsd_weld(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_weld(:,:,1:nfsd_hist,iblk)/dt, a3Df) endif ! a3Df allocated ! 4D floe size, thickness category fields @@ -476,7 +479,7 @@ subroutine accum_hist_fsd (iblk) do j = 1, ny_block do i = 1, nx_block workd(i,j,k,n) = trcrn(i,j,nt_fsd+k-1,n,iblk) & - * aicen_init(i,j,n,iblk)/floe_binwidth(k) + * aicen(i,j,n,iblk)/floe_binwidth(k) end do end do end do diff --git a/cicecore/cicedyn/analysis/ice_history_pond.F90 b/cicecore/cicedyn/analysis/ice_history_pond.F90 index f6e4b8737..8818ff94e 100644 --- a/cicecore/cicedyn/analysis/ice_history_pond.F90 +++ b/cicecore/cicedyn/analysis/ice_history_pond.F90 @@ -268,13 +268,8 @@ subroutine accum_hist_pond (iblk) integer (kind=int_kind) :: & nt_apnd, nt_hpnd, nt_alvl, nt_ipnd -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: & - tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: & tr_pond_lvl, tr_pond_topo -#endif real (kind=dbl_kind) :: & puny @@ -289,13 +284,8 @@ subroutine accum_hist_pond (iblk) !--------------------------------------------------------------- call icepack_query_parameters(puny_out=puny) -#ifdef UNDEPRECATE_CESMPONDS - call icepack_query_tracer_flags(tr_pond_cesm_out=tr_pond_cesm, & - tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo) -#else call icepack_query_tracer_flags(tr_pond_lvl_out=tr_pond_lvl, & tr_pond_topo_out=tr_pond_topo) -#endif call icepack_query_tracer_indices(nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, & nt_alvl_out=nt_alvl, nt_ipnd_out=nt_ipnd) call icepack_warnings_flush(nu_diag) @@ -303,27 +293,9 @@ subroutine accum_hist_pond (iblk) file=__FILE__, line=__LINE__) if (allocated(a2D)) then -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - if (f_apond(1:1)/= 'x') & - call accum_hist_field(n_apond, iblk, & - trcr(:,:,nt_apnd,iblk), a2D) - if (f_apond_ai(1:1)/= 'x') & - call accum_hist_field(n_apond_ai, iblk, & - aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk), a2D) - if (f_hpond(1:1)/= 'x') & - call accum_hist_field(n_hpond, iblk, & - trcr(:,:,nt_apnd,iblk) & - * trcr(:,:,nt_hpnd,iblk), a2D) - if (f_hpond_ai(1:1)/= 'x') & - call accum_hist_field(n_hpond_ai, iblk, & - aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk) & - * trcr(:,:,nt_hpnd,iblk), a2D) - elseif (tr_pond_lvl) then -#else if (tr_pond_lvl) then -#endif + if (f_apond(1:1)/= 'x') & call accum_hist_field(n_apond, iblk, & trcr(:,:,nt_alvl,iblk) * trcr(:,:,nt_apnd,iblk), a2D) diff --git a/cicecore/cicedyn/analysis/ice_history_shared.F90 b/cicecore/cicedyn/analysis/ice_history_shared.F90 index 4ce3955d6..d9c62edde 100644 --- a/cicecore/cicedyn/analysis/ice_history_shared.F90 +++ b/cicecore/cicedyn/analysis/ice_history_shared.F90 @@ -120,9 +120,9 @@ module ice_history_shared avail_hist_fields(max_avail_hist_fields) integer (kind=int_kind), parameter, public :: & - nvar = 12 , & ! number of grid fields that can be written + nvar_grd = 21 , & ! number of grid fields that can be written ! excluding grid vertices - nvarz = 6 ! number of category/vertical grid fields written + nvar_grdz = 6 ! number of category/vertical grid fields written integer (kind=int_kind), public :: & ncat_hist , & ! number of thickness categories written <= ncat @@ -152,51 +152,44 @@ module ice_history_shared avgct(max_nstrm) ! average sample counter logical (kind=log_kind), public :: & - igrd (nvar), & ! true if grid field is written to output file - igrdz(nvarz) ! true if category/vertical grid field is written + igrd (nvar_grd), & ! true if grid field is written to output file + igrdz(nvar_grdz) ! true if category/vertical grid field is written character (len=25), public, parameter :: & - tcstr = 'area: tarea' , & ! vcellmeas for T cell quantities - ucstr = 'area: uarea' , & ! vcellmeas for U cell quantities - tstr2D = 'TLON TLAT time' , & ! vcoord for T cell quantities, 2D - ustr2D = 'ULON ULAT time' , & ! vcoord for U cell quantities, 2D - tstr3Dz = 'TLON TLAT VGRDi time',& ! vcoord for T cell quantities, 3D - ustr3Dz = 'ULON ULAT VGRDi time',& ! vcoord for U cell quantities, 3D - tstr3Dc = 'TLON TLAT NCAT time',& ! vcoord for T cell quantities, 3D - ustr3Dc = 'ULON ULAT NCAT time',& ! vcoord for U cell quantities, 3D - tstr3Db = 'TLON TLAT VGRDb time',& ! vcoord for T cell quantities, 3D - ustr3Db = 'ULON ULAT VGRDb time',& ! vcoord for U cell quantities, 3D - tstr3Da = 'TLON TLAT VGRDa time',& ! vcoord for T cell quantities, 3D - ustr3Da = 'ULON ULAT VGRDa time',& ! vcoord for U cell quantities, 3D - tstr3Df = 'TLON TLAT NFSD time',& ! vcoord for T cell quantities, 3D - ustr3Df = 'ULON ULAT NFSD time',& ! vcoord for U cell quantities, 3D - -!ferret + ! T grids + tcstr = 'area: tarea' , & ! vcellmeas for T cell quantities + tstr2D = 'TLON TLAT time' , & ! vcoord for T cell, 2D + tstr3Dc = 'TLON TLAT NCAT time', & ! vcoord for T cell, 3D, ncat + tstr3Da = 'TLON TLAT VGRDa time', & ! vcoord for T cell, 3D, ice-snow-bio + tstr3Db = 'TLON TLAT VGRDb time', & ! vcoord for T cell, 3D, ice-bio + tstr3Df = 'TLON TLAT NFSD time', & ! vcoord for T cell, 3D, fsd tstr4Di = 'TLON TLAT VGRDi NCAT', & ! vcoord for T cell, 4D, ice - ustr4Di = 'ULON ULAT VGRDi NCAT', & ! vcoord for U cell, 4D, ice tstr4Ds = 'TLON TLAT VGRDs NCAT', & ! vcoord for T cell, 4D, snow - ustr4Ds = 'ULON ULAT VGRDs NCAT', & ! vcoord for U cell, 4D, snow tstr4Df = 'TLON TLAT NFSD NCAT', & ! vcoord for T cell, 4D, fsd - ustr4Df = 'ULON ULAT NFSD NCAT' ! vcoord for U cell, 4D, fsd -!ferret -! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time -! ustr4Di = 'ULON ULAT VGRDi NCAT time', & ! index on 4D variables. -! tstr4Ds = 'TLON TLAT VGRDs NCAT time', & ! Use 'ferret' lines instead -! ustr4Ds = 'ULON ULAT VGRDs NCAT time', & ! (below also) -! tstr4Db = 'TLON TLAT VGRDb NCAT time', & -! ustr4Db = 'ULON ULAT VGRDb NCAT time', & -! tstr4Df = 'TLON TLAT NFSD NCAT time', & -! ustr4Df = 'ULON ULAT NFSD NCAT time', & + ! U grids + ucstr = 'area: uarea' , & ! vcellmeas for U cell quantities + ustr2D = 'ULON ULAT time' , & ! vcoord for U cell, 2D + ! N grids + ncstr = 'area: narea' , & ! vcellmeas for N cell quantities + nstr2D = 'NLON NLAT time' , & ! vcoord for N cell, 2D + ! E grids + ecstr = 'area: earea' , & ! vcellmeas for E cell quantities + estr2D = 'ELON ELAT time' ! vcoord for E cell, 2D !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- logical (kind=log_kind), public :: & - f_tmask = .true., f_blkmask = .true., & + f_tmask = .true., f_umask = .true., & + f_nmask = .true., f_emask = .true., & + f_blkmask = .true., & f_tarea = .true., f_uarea = .true., & + f_narea = .true., f_earea = .true., & f_dxt = .true., f_dyt = .true., & f_dxu = .true., f_dyu = .true., & + f_dxn = .true., f_dyn = .true., & + f_dxe = .true., f_dye = .true., & f_HTN = .true., f_HTE = .true., & f_ANGLE = .true., f_ANGLET = .true., & f_bounds = .true., f_NCAT = .true., & @@ -210,6 +203,11 @@ module ice_history_shared f_snowfrac = 'x', f_snowfracn = 'x', & f_Tsfc = 'm', f_aice = 'm', & f_uvel = 'm', f_vvel = 'm', & + f_icespd = 'm', f_icedir = 'm', & + f_uvelE = 'x', f_vvelE = 'x', & + f_icespdE = 'x', f_icedirE = 'x', & + f_uvelN = 'x', f_vvelN = 'x', & + f_icespdN = 'x', f_icedirN = 'x', & f_uatm = 'm', f_vatm = 'm', & f_atmspd = 'm', f_atmdir = 'm', & f_fswup = 'm', & @@ -250,6 +248,18 @@ module ice_history_shared f_strocnx = 'm', f_strocny = 'm', & f_strintx = 'm', f_strinty = 'm', & f_taubx = 'm', f_tauby = 'm', & + f_strairxN = 'x', f_strairyN = 'x', & + f_strtltxN = 'x', f_strtltyN = 'x', & + f_strcorxN = 'x', f_strcoryN = 'x', & + f_strocnxN = 'x', f_strocnyN = 'x', & + f_strintxN = 'x', f_strintyN = 'x', & + f_taubxN = 'x', f_taubyN = 'x', & + f_strairxE = 'x', f_strairyE = 'x', & + f_strtltxE = 'x', f_strtltyE = 'x', & + f_strcorxE = 'x', f_strcoryE = 'x', & + f_strocnxE = 'x', f_strocnyE = 'x', & + f_strintxE = 'x', f_strintyE = 'x', & + f_taubxE = 'x', f_taubyE = 'x', & f_strength = 'm', & f_divu = 'm', f_shear = 'm', & f_sig1 = 'm', f_sig2 = 'm', & @@ -339,10 +349,15 @@ module ice_history_shared !--------------------------------------------------------------- namelist / icefields_nml / & - f_tmask , f_blkmask , & + f_tmask , f_umask , & + f_nmask , f_emask , & + f_blkmask , & f_tarea , f_uarea , & + f_narea , f_earea , & f_dxt , f_dyt , & f_dxu , f_dyu , & + f_dxn , f_dyn , & + f_dxe , f_dye , & f_HTN , f_HTE , & f_ANGLE , f_ANGLET , & f_bounds , f_NCAT , & @@ -354,6 +369,12 @@ module ice_history_shared f_snowfrac, f_snowfracn, & f_Tsfc, f_aice , & f_uvel, f_vvel , & + f_icespd, f_icedir , & +! For now, C and CD grid quantities are controlled by the generic (originally B-grid) namelist flag +! f_uvelE, f_vvelE , & +! f_icespdE, f_icedirE , & +! f_uvelN, f_vvelN , & +! f_icespdN, f_icedirN , & f_uatm, f_vatm , & f_atmspd, f_atmdir , & f_fswup, & @@ -394,6 +415,18 @@ module ice_history_shared f_strocnx, f_strocny , & f_strintx, f_strinty , & f_taubx, f_tauby , & +! f_strairxN, f_strairyN , & +! f_strtltxN, f_strtltyN , & +! f_strcorxN, f_strcoryN , & +! f_strocnxN, f_strocnyN , & +! f_strintxN, f_strintyN , & +! f_taubxN, f_taubyN , & +! f_strairxE, f_strairyE , & +! f_strtltxE, f_strtltyE , & +! f_strcorxE, f_strcoryE , & +! f_strocnxE, f_strocnyE , & +! f_strintxE, f_strintyE , & +! f_taubxE, f_taubyE , & f_strength, & f_divu, f_shear , & f_sig1, f_sig2 , & @@ -484,17 +517,26 @@ module ice_history_shared integer (kind=int_kind), parameter, public :: & n_tmask = 1, & - n_blkmask = 2, & - n_tarea = 3, & - n_uarea = 4, & - n_dxt = 5, & - n_dyt = 6, & - n_dxu = 7, & - n_dyu = 8, & - n_HTN = 9, & - n_HTE = 10, & - n_ANGLE = 11, & - n_ANGLET = 12, & + n_umask = 2, & + n_nmask = 3, & + n_emask = 4, & + n_blkmask = 5, & + n_tarea = 6, & + n_uarea = 7, & + n_narea = 8, & + n_earea = 9, & + n_dxt = 10, & + n_dyt = 11, & + n_dxu = 12, & + n_dyu = 13, & + n_dxn = 14, & + n_dyn = 15, & + n_dxe = 16, & + n_dye = 17, & + n_HTN = 18, & + n_HTE = 19, & + n_ANGLE = 20, & + n_ANGLET = 21, & n_NCAT = 1, & n_VGRDi = 2, & @@ -506,7 +548,11 @@ module ice_history_shared n_lont_bnds = 1, & n_latt_bnds = 2, & n_lonu_bnds = 3, & - n_latu_bnds = 4 + n_latu_bnds = 4, & + n_lonn_bnds = 5, & + n_latn_bnds = 6, & + n_lone_bnds = 7, & + n_late_bnds = 8 integer (kind=int_kind), dimension(max_nstrm), public :: & ! n_example , & @@ -514,6 +560,11 @@ module ice_history_shared n_snowfrac , n_snowfracn , & n_Tsfc , n_aice , & n_uvel , n_vvel , & + n_icespd , n_icedir , & + n_uvelE , n_vvelE , & + n_icespdE , n_icedirE , & + n_uvelN , n_vvelN , & + n_icespdN , n_icedirN , & n_uatm , n_vatm , & n_atmspd , n_atmdir , & n_sice , & @@ -556,6 +607,18 @@ module ice_history_shared n_strocnx , n_strocny , & n_strintx , n_strinty , & n_taubx , n_tauby , & + n_strairxN , n_strairyN , & + n_strtltxN , n_strtltyN , & + n_strcorxN , n_strcoryN , & + n_strocnxN , n_strocnyN , & + n_strintxN , n_strintyN , & + n_taubxN , n_taubyN , & + n_strairxE , n_strairyE , & + n_strtltxE , n_strtltyE , & + n_strcorxE , n_strcoryE , & + n_strocnxE , n_strocnyE , & + n_strintxE , n_strintyE , & + n_taubxE , n_taubyE , & n_strength , & n_divu , n_shear , & n_sig1 , n_sig2 , & diff --git a/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 b/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 index f71d959da..28a047c4e 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 @@ -134,19 +134,19 @@ subroutine eap (dt) dyn_prep1, dyn_prep2, stepu, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & seabed_stress_method, seabed_stress, & - stack_fields, unstack_fields + stack_fields, unstack_fields, iceTmask, iceUmask use ice_flux, only: rdg_conv, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strocnxT, strocnyT, strax, stray, & + strax, stray, & TbU, hwater, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, grid_average_X2Y, iceumask, & + tarear, uarear, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & + use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -163,31 +163,33 @@ subroutine eap (dt) i, j, ij integer (kind=int_kind), dimension(max_blocks) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + icellT , & ! no. of cells where iceTmask = .true. + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj , & ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & uocnU , & ! i ocean current (m/s) vocnU , & ! j ocean current (m/s) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) + cdn_ocnU , & ! ocn drag coefficient tmass , & ! total mass of ice and snow (kg/m^2) waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) forcexU , & ! work array: combined atm stress and ocn tilt, x forceyU , & ! work array: combined atm stress and ocn tilt, y - aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:) ! temporary for stacking fields for halo update + fld2(:,:,:,:), & ! temporary for stacking fields for halo update + fld3(:,:,:,:), & ! temporary for stacking fields for halo update + fld4(:,:,:,:) ! temporary for stacking fields for halo update real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -196,7 +198,6 @@ subroutine eap (dt) calc_strair integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask , & ! ice extent mask (T-cell) halomask ! ice mask for halo update type (ice_halo) :: & @@ -205,10 +206,6 @@ subroutine eap (dt) type (block) :: & this_block ! block information for current block - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 , & ! temporary - work2 ! temporary - character(len=*), parameter :: subname = '(eap)' call ice_timer_start(timer_dynamics) ! dynamics @@ -218,6 +215,8 @@ subroutine eap (dt) !----------------------------------------------------------------- allocate(fld2(nx_block,ny_block,2,max_blocks)) + allocate(fld3(nx_block,ny_block,3,max_blocks)) + allocate(fld4(nx_block,ny_block,4,max_blocks)) ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) @@ -256,13 +255,13 @@ subroutine eap (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), icetmask(:,:,iblk)) + tmass (:,:,iblk), iceTmask(:,:,iblk)) enddo ! iblk !$OMP END PARALLEL DO call ice_timer_start(timer_bound) - call ice_HaloUpdate (icetmask, halo_info, & + call ice_HaloUpdate (iceTmask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -270,8 +269,18 @@ subroutine eap (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F', tmass , 'T' , umass, 'U') - call grid_average_X2Y('F', aice_init, 'T' , aiU , 'U') + call stack_fields(tmass, aice_init, cdn_ocn, fld3) + call ice_HaloUpdate (fld3, halo_info, & + field_loc_center, field_type_scalar) + call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) + call ice_HaloUpdate (fld4, halo_info, & + field_loc_center, field_type_vector) + call unstack_fields(fld3, tmass, aice_init, cdn_ocn) + call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) + + call grid_average_X2Y('S', tmass , 'T' , umass , 'U') + call grid_average_X2Y('S', aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnU, 'U') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') @@ -314,16 +323,16 @@ subroutine eap (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icellu (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT (iblk), icellU (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -347,7 +356,7 @@ subroutine eap (dt) do j = 1, ny_block do i = 1, nx_block - if (icetmask(i,j,iblk)==0) then + if (.not.iceTmask(i,j,iblk)) then if (tmask(i,j,iblk)) then ! structure tensor a11_1(i,j,iblk) = p5 @@ -364,7 +373,7 @@ subroutine eap (dt) a12_2(i,j,iblk) = c0 a12_3(i,j,iblk) = c0 a12_4(i,j,iblk) = c0 - endif ! icetmask + endif ! iceTmask enddo ! i enddo ! j @@ -374,9 +383,9 @@ subroutine eap (dt) !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellt(iblk) - i = indxti(ij, iblk) - j = indxtj(ij, iblk) + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) call icepack_ice_strength(ncat=ncat, & aice = aice (i,j, iblk), & vice = vice (i,j, iblk), & @@ -405,7 +414,7 @@ subroutine eap (dt) if (maskhalo_dyn) then call ice_timer_start(timer_bound) halomask = 0 - where (iceumask) halomask = 1 + where (iceUmask) halomask = 1 call ice_HaloUpdate (halomask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -421,8 +430,8 @@ subroutine eap (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbU (:,:,iblk)) enddo @@ -432,8 +441,8 @@ subroutine eap (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block , ny_block , & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & hwater (:,:,iblk), TbU (:,:,iblk)) enddo @@ -453,8 +462,8 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp1,iblk) call stress_eap (nx_block, ny_block, & ksub, ndte, & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & arlx1i, denom1, & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & @@ -491,8 +500,8 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp2,iblk) call stepu (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU (:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & waterxU (:,:,iblk), wateryU (:,:,iblk), & @@ -513,8 +522,8 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp3,iblk) if (mod(ksub,10) == 1) then ! only called every 10th timestep call stepa (nx_block , ny_block , & - dtei , icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + dtei , icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & a11 (:,:,iblk), a12 (:,:,iblk), & a11_1 (:,:,iblk), a11_2 (:,:,iblk), & a11_3 (:,:,iblk), a11_4 (:,:,iblk), & @@ -545,7 +554,7 @@ subroutine eap (dt) enddo ! subcycling - deallocate(fld2) + deallocate(fld2,fld3,fld4) if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) !----------------------------------------------------------------- @@ -557,8 +566,8 @@ subroutine eap (dt) call dyn_finish & (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiU (:,:,iblk), fmU (:,:,iblk), & @@ -567,27 +576,6 @@ subroutine eap (dt) enddo !$OMP END PARALLEL DO - ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T - ! TODO: This should be done elsewhere as part of generalization? - ! conservation requires aiU be divided before averaging - work1 = c0 - work2 = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij,iblk) - j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) - work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) - enddo - enddo - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (work2, halo_info, & - field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F', work1, 'U', strocnxT, 'T') ! shift - call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') - call ice_timer_stop(timer_dynamics) ! dynamics end subroutine eap @@ -1165,8 +1153,8 @@ end FUNCTION s22ks subroutine stress_eap (nx_block, ny_block, & ksub, ndte, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & arlx1i, denom1, & uvel, vvel, & dxT, dyT, & @@ -1198,11 +1186,11 @@ subroutine stress_eap (nx_block, ny_block, & nx_block, ny_block, & ! block dimensions ksub , & ! subcycling step ndte , & ! number of subcycles - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), intent(in) :: & arlx1i , & ! dte/2T (original) or 1/alpha1 (revised) @@ -1285,9 +1273,9 @@ subroutine stress_eap (nx_block, ny_block, & strtmp(:,:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1889,8 +1877,8 @@ end subroutine update_stress_rdg ! Solves evolution equation for structure tensor (A19, A20) subroutine stepa (nx_block, ny_block, & - dtei, icellt, & - indxti, indxtj, & + dtei, icellT, & + indxTi, indxTj, & a11, a12, & a11_1, a11_2, a11_3, a11_4, & a12_1, a12_2, a12_3, a12_4, & @@ -1903,14 +1891,14 @@ subroutine stepa (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. real (kind=dbl_kind), intent(in) :: & dtei ! 1/dte, where dte is subcycling timestep (1/s) integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & ! ice stress tensor (kg/s^2) in each corner of T cell @@ -1940,9 +1928,9 @@ subroutine stepa (nx_block, ny_block, & dteikth = c1 / (dtei + kth) p5kth = p5 * kth - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) ! ne call calc_ffrac(stressp_1(i,j), stressm_1(i,j), & diff --git a/cicecore/cicedyn/dynamics/ice_transport_driver.F90 b/cicecore/cicedyn/dynamics/ice_transport_driver.F90 index d972c76bf..59cc436d9 100644 --- a/cicecore/cicedyn/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedyn/dynamics/ice_transport_driver.F90 @@ -1521,13 +1521,8 @@ subroutine state_to_work (nx_block, ny_block, & integer (kind=int_kind) :: & nt_alvl, nt_apnd, nt_fbri -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: & - tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: & tr_pond_lvl, tr_pond_topo -#endif integer (kind=int_kind) :: & i, j, n, it ,&! counting indices @@ -1535,13 +1530,8 @@ subroutine state_to_work (nx_block, ny_block, & character(len=*), parameter :: subname = '(state_to_work)' -#ifdef UNDEPRECATE_CESMPONDS - call icepack_query_tracer_flags(tr_pond_cesm_out=tr_pond_cesm, & - tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo) -#else call icepack_query_tracer_flags(tr_pond_lvl_out=tr_pond_lvl, & tr_pond_topo_out=tr_pond_topo) -#endif call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_apnd_out=nt_apnd, & nt_fbri_out=nt_fbri) call icepack_warnings_flush(nu_diag) @@ -1600,13 +1590,8 @@ subroutine state_to_work (nx_block, ny_block, & * trcrn(i,j,it,n) enddo enddo -#ifdef UNDEPRECATE_CESMPONDS - elseif (trcr_depend(it) == 2+nt_apnd .and. & - tr_pond_cesm .or. tr_pond_topo) then -#else elseif (trcr_depend(it) == 2+nt_apnd .and. & tr_pond_topo) then -#endif do j = 1, ny_block do i = 1, nx_block works(i,j,narrays+it) = aicen(i,j,n) & diff --git a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 index 6fd037b7b..286a51711 100644 --- a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 @@ -633,6 +633,7 @@ subroutine horizontal_remap (dt, ntrace, & endif ! nghost + ! tcraig, this OMP loop sometimes fails with cce/14.0.3, compiler bug?? !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,n, & !$OMP edgearea_e,edgearea_n,edge,iflux,jflux, & !$OMP xp,yp,indxing,indxjng,mflxe,mflxn, & diff --git a/cicecore/cicedyn/general/ice_flux.F90 b/cicecore/cicedyn/general/ice_flux.F90 index a7e5aa584..8d190753e 100644 --- a/cicecore/cicedyn/general/ice_flux.F90 +++ b/cicecore/cicedyn/general/ice_flux.F90 @@ -35,6 +35,16 @@ module ice_flux ! Dynamics component ! All variables are assumed to be on the atm or ocn thermodynamic ! grid except as noted + ! + ! scale_fluxes divides several of these by aice "in place", so + ! the state of some of these variables is not well defined. In the + ! future, we need to refactor and add "_iavg" versions of the + ! fields to clearly differentiate fields that have been divided + ! by aice and others that are not. The challenge is that we need + ! to go thru each field carefully to see which version is used. + ! For instance, in diagnostics, there are places where these + ! fields are multiplied by aice to compute things properly. + ! strocn[x,y]T_iavg is the first field defined using _iavg. !----------------------------------------------------------------- real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & @@ -56,8 +66,8 @@ module ice_flux ! out to ocean T-cell (kg/m s^2) ! Note, CICE_IN_NEMO uses strocnx and strocny for coupling - strocnxT, & ! ice-ocean stress, x-direction at T points, per ice fraction - strocnyT ! ice-ocean stress, y-direction at T points, per ice fraction + strocnxT_iavg, & ! ice-ocean stress, x-direction at T points, per ice fraction (scaled flux) + strocnyT_iavg ! ice-ocean stress, y-direction at T points, per ice fraction (scaled flux) ! diagnostic @@ -389,8 +399,8 @@ subroutine alloc_flux hwater (nx_block,ny_block,max_blocks), & ! water depth for seabed stress calc (landfast ice) strairxT (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction strairyT (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction - strocnxT (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction - strocnyT (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction + strocnxT_iavg(nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction, per ice area + strocnyT_iavg(nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction, per ice area sig1 (nx_block,ny_block,max_blocks), & ! normalized principal stress component sig2 (nx_block,ny_block,max_blocks), & ! normalized principal stress component sigP (nx_block,ny_block,max_blocks), & ! internal ice pressure (N/m) @@ -765,8 +775,8 @@ subroutine init_coupler_flux ! fluxes sent to ocean !----------------------------------------------------------------- - strocnxT(:,:,:) = c0 ! ice-ocean stress, x-direction (T-cell) - strocnyT(:,:,:) = c0 ! ice-ocean stress, y-direction (T-cell) + strocnxT_iavg (:,:,:) = c0 ! ice-ocean stress, x-direction (T-cell) + strocnyT_iavg (:,:,:) = c0 ! ice-ocean stress, y-direction (T-cell) fresh (:,:,:) = c0 fsalt (:,:,:) = c0 fpond (:,:,:) = c0 diff --git a/cicecore/cicedyn/general/ice_forcing.F90 b/cicecore/cicedyn/general/ice_forcing.F90 index edff03b9f..ff79778c5 100755 --- a/cicecore/cicedyn/general/ice_forcing.F90 +++ b/cicecore/cicedyn/general/ice_forcing.F90 @@ -22,7 +22,7 @@ module ice_forcing use ice_boundary, only: ice_HaloUpdate use ice_blocks, only: nx_block, ny_block use ice_domain, only: halo_info - use ice_domain_size, only: ncat, max_blocks, nx_global, ny_global + use ice_domain_size, only: ncat, max_blocks, nx_global, ny_global, nfreq use ice_communicate, only: my_task, master_task use ice_calendar, only: istep, istep1, & msec, mday, mmonth, myear, yday, daycal, & @@ -116,6 +116,9 @@ module ice_forcing topmelt_data, & botmelt_data + real (kind=dbl_kind), dimension(:,:,:,:,:), allocatable :: & + wave_spectrum_data ! field values at 2 temporal data points + character(char_len), public :: & atm_data_format, & ! 'bin'=binary or 'nc'=netcdf ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf @@ -229,6 +232,7 @@ subroutine alloc_forcing ocn_frc_m(nx_block,ny_block, max_blocks,nfld,12), & ! ocn data for 12 months topmelt_file(ncat), & botmelt_file(ncat), & + wave_spectrum_data(nx_block,ny_block,nfreq,2,max_blocks), & stat=ierr) if (ierr/=0) call abort_ice('(alloc_forcing): Out of Memory') @@ -1527,6 +1531,44 @@ subroutine interpolate_data (field_data, field) end subroutine interpolate_data +!======================================================================= + + subroutine interpolate_wavespec_data (field_data, field) + +! Linear interpolation + +! author: Elizabeth C. Hunke, LANL + + use ice_domain, only: nblocks + + real (kind=dbl_kind), dimension(nx_block,ny_block,nfreq,2,max_blocks), intent(in) :: & + field_data ! 2 values used for interpolation + + real (kind=dbl_kind), dimension(nx_block,ny_block,nfreq,max_blocks), intent(out) :: & + field ! interpolated field + + ! local variables + + integer (kind=int_kind) :: i,j, iblk, freq + + character(len=*), parameter :: subname = '(interpolate data)' + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + do freq = 1, nfreq + field(i,j,freq,iblk) = c1intp * field_data(i,j,freq,1,iblk) & + + c2intp * field_data(i,j,freq,2,iblk) + enddo + enddo + enddo + enddo + !$OMP END PARALLEL DO + + end subroutine interpolate_wavespec_data + + !======================================================================= subroutine file_year (data_file, yr) @@ -4129,8 +4171,8 @@ subroutine ocn_data_ncar_init_3D work1(:,:,:) = ocn_frc_m(:,:,:,n ,m) work2(:,:,:) = ocn_frc_m(:,:,:,n+1,m) - call grid_average_X2Y('F',work1,'T',ocn_frc_m(:,:,:,n ,m),'U') - call grid_average_X2Y('F',work2,'T',ocn_frc_m(:,:,:,n+1,m),'U') + call grid_average_X2Y('A',work1,'T',ocn_frc_m(:,:,:,n ,m),'U') + call grid_average_X2Y('A',work2,'T',ocn_frc_m(:,:,:,n+1,m),'U') enddo ! month loop enddo ! field loop @@ -4373,7 +4415,7 @@ subroutine ocn_data_hadgem(dt) use ice_domain, only: nblocks use ice_domain_size, only: max_blocks use ice_flux, only: sst, uocn, vocn - use ice_grid, only: grid_average_X2Y, ANGLET + use ice_grid, only: ANGLET real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -5566,7 +5608,6 @@ subroutine get_wave_spec use ice_arrays_column, only: wave_spectrum, & dwavefreq, wavefreq use ice_constants, only: c0 - use ice_domain_size, only: nfreq use ice_timers, only: ice_timer_start, ice_timer_stop, timer_fsd ! local variables @@ -5592,7 +5633,6 @@ subroutine get_wave_spec ! if no wave data is provided, wave_spectrum is zero everywhere wave_spectrum(:,:,:,:) = c0 - wave_spec_dir = ocn_data_dir debug_forcing = .false. ! wave spectrum and frequencies @@ -5610,10 +5650,7 @@ subroutine get_wave_spec file=__FILE__, line=__LINE__) else #ifdef USE_NETCDF - call ice_open_nc(wave_spec_file,fid) - call ice_read_nc_xyf (fid, 1, 'efreq', wave_spectrum(:,:,:,:), debug_forcing, & - field_loc_center, field_type_scalar) - call ice_close_nc(fid) + call wave_spec_data #else write (nu_diag,*) "wave spectrum file not available, requires cpp USE_NETCDF" write (nu_diag,*) "wave spectrum file not available, using default profile" @@ -5628,6 +5665,146 @@ subroutine get_wave_spec end subroutine get_wave_spec +!======================================================================= +! +! Read in wave spectrum forcing as a function of time. 6 hourly +! LR started working from JRA55_data routine +! Changed fields, and changed 3 hourly to 6 hourly +! + subroutine wave_spec_data + + use ice_blocks, only: block, get_block + use ice_global_reductions, only: global_minval, global_maxval + use ice_domain, only: nblocks, distrb_info, blocks_ice + use ice_arrays_column, only: wave_spectrum, & + dwavefreq, wavefreq + use ice_read_write, only: ice_read_nc_xyf + use ice_grid, only: hm, tlon, tlat, tmask, umask + use ice_calendar, only: days_per_year, use_leap_years + + integer (kind=int_kind) :: & + ncid , & ! netcdf file id + i, j, freq , & + ixm,ixx,ixp , & ! record numbers for neighboring months + recnum , & ! record number + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth , & ! middle day of month + dataloc , & ! = 1 for data located in middle of time interval + ! = 2 for date located at end of time interval + iblk , & ! block index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + yr ! current forcing year + + real (kind=dbl_kind) :: & + sec6hr , & ! number of seconds in 3 hours + secday , & ! number of seconds in day + vmin, vmax + + logical (kind=log_kind) :: readm, read6,debug_n_d + + type (block) :: & + this_block ! block information for current block + + real(kind=dbl_kind), dimension(nfreq) :: & + wave_spectrum_profile ! wave spectrum + + character(len=64) :: fieldname !netcdf field name + character(char_len_long) :: spec_file + character(char_len) :: wave_spec_type + logical (kind=log_kind) :: wave_spec + character(len=*), parameter :: subname = '(wave_spec_data)' + + + + debug_n_d = .false. !usually false + + call icepack_query_parameters(secday_out=secday) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call icepack_init_wave(nfreq, & + wave_spectrum_profile, & + wavefreq, dwavefreq) + + + !spec_file = trim(ocn_data_dir)//'/'//trim(wave_spec_file) + spec_file = trim(wave_spec_file) + wave_spectrum_data = c0 + wave_spectrum = c0 + yr = fyear ! current year + !------------------------------------------------------------------- + ! 6-hourly data + ! + ! Assume that the 6-hourly value is located at the end of the + ! 6-hour period. This is the convention for NCEP reanalysis data. + ! E.g. record 1 gives conditions at 6 am GMT on 1 January. + !------------------------------------------------------------------- + + dataloc = 2 ! data located at end of interval + sec6hr = secday/c4 ! seconds in 6 hours + !maxrec = 2920 ! 365*8; for leap years = 366*8 + + if (use_leap_years) days_per_year = 366 !overrides setting of 365 in ice_calendar + maxrec = days_per_year*4 + + if(days_per_year == 365 .and. (mod(yr, 4) == 0)) then + call abort_ice('days_per_year should be set to 366 for leap years') + end if + + ! current record number + recnum = 4*int(yday) - 3 + int(real(msec,kind=dbl_kind)/sec6hr) + + ! Compute record numbers for surrounding data (2 on each side) + + ixm = mod(recnum+maxrec-2,maxrec) + 1 + ixx = mod(recnum-1, maxrec) + 1 + + ! Compute interpolation coefficients + ! If data is located at the end of the time interval, then the + ! data value for the current record goes in slot 2 + + recslot = 2 + ixp = -99 + call interp_coeff (recnum, recslot, sec6hr, dataloc) + + ! Read + read6 = .false. + if (istep==1 .or. oldrecnum .ne. recnum) read6 = .true. + !------------------------------------------------------------------- + ! File is NETCDF + ! file variable names are: + ! efreq (wave spectrum, energy as a function of wave frequency UNITS) + !------------------------------------------------------------------- + call ice_open_nc(spec_file,ncid) + + call ice_read_nc_xyf(ncid,recnum,'efreq',wave_spectrum_data(:,:,:,1,:),debug_n_d, & + field_loc=field_loc_center, & + field_type=field_type_scalar) + call ice_read_nc_xyf(ncid,recnum,'efreq',wave_spectrum_data(:,:,:,2,:),debug_n_d, & + field_loc=field_loc_center, & + field_type=field_type_scalar) + call ice_close_nc(ncid) + + + ! Interpolate + call interpolate_wavespec_data (wave_spectrum_data, wave_spectrum) + + ! Save record number + oldrecnum = recnum + + if (local_debug) then + if (my_task == master_task) write (nu_diag,*) & + 'wave_spec_data ',spec_file + if (my_task.eq.master_task) & + write (nu_diag,*) 'maxrec',maxrec + write (nu_diag,*) 'days_per_year', days_per_year + + endif ! local debug + + end subroutine wave_spec_data + !======================================================================= ! initial snow aging lookup table diff --git a/cicecore/cicedyn/general/ice_forcing_bgc.F90 b/cicecore/cicedyn/general/ice_forcing_bgc.F90 index fc440834c..2f07d05f1 100644 --- a/cicecore/cicedyn/general/ice_forcing_bgc.F90 +++ b/cicecore/cicedyn/general/ice_forcing_bgc.F90 @@ -889,68 +889,116 @@ subroutine faero_optics ! this data is used in bulk aerosol treatment in dEdd radiation kaer_tab = reshape((/ & ! aerosol mass extinction cross section (m2/kg) - 11580.61872, 5535.41835, 2793.79690, & - 25798.96479, 11536.03871, 4688.24207, & - 196.49772, 204.14078, 214.42287, & - 2665.85867, 2256.71027, 820.36024, & - 840.78295, 1028.24656, 1163.03298, & - 387.51211, 414.68808, 450.29814/), & +! 11580.61872, 5535.41835, 2793.79690, & +! 25798.96479, 11536.03871, 4688.24207, & +! 196.49772, 204.14078, 214.42287, & +! 2665.85867, 2256.71027, 820.36024, & +! 840.78295, 1028.24656, 1163.03298, & +! 387.51211, 414.68808, 450.29814/), & + 11580.61872_dbl_kind, 5535.41835_dbl_kind, 2793.79690_dbl_kind, & + 25798.96479_dbl_kind, 11536.03871_dbl_kind, 4688.24207_dbl_kind, & + 196.49772_dbl_kind, 204.14078_dbl_kind, 214.42287_dbl_kind, & + 2665.85867_dbl_kind, 2256.71027_dbl_kind, 820.36024_dbl_kind, & + 840.78295_dbl_kind, 1028.24656_dbl_kind, 1163.03298_dbl_kind, & + 387.51211_dbl_kind, 414.68808_dbl_kind, 450.29814_dbl_kind/), & (/icepack_nspint,icepack_max_aero/)) waer_tab = reshape((/ & ! aerosol single scatter albedo (fraction) - 0.29003, 0.17349, 0.06613, & - 0.51731, 0.41609, 0.21324, & - 0.84467, 0.94216, 0.95666, & - 0.97764, 0.99402, 0.98552, & - 0.94146, 0.98527, 0.99093, & - 0.90034, 0.96543, 0.97678/), & +! 0.29003, 0.17349, 0.06613, & +! 0.51731, 0.41609, 0.21324, & +! 0.84467, 0.94216, 0.95666, & +! 0.97764, 0.99402, 0.98552, & +! 0.94146, 0.98527, 0.99093, & +! 0.90034, 0.96543, 0.97678/), & + 0.29003_dbl_kind, 0.17349_dbl_kind, 0.06613_dbl_kind, & + 0.51731_dbl_kind, 0.41609_dbl_kind, 0.21324_dbl_kind, & + 0.84467_dbl_kind, 0.94216_dbl_kind, 0.95666_dbl_kind, & + 0.97764_dbl_kind, 0.99402_dbl_kind, 0.98552_dbl_kind, & + 0.94146_dbl_kind, 0.98527_dbl_kind, 0.99093_dbl_kind, & + 0.90034_dbl_kind, 0.96543_dbl_kind, 0.97678_dbl_kind/), & (/icepack_nspint,icepack_max_aero/)) gaer_tab = reshape((/ & ! aerosol asymmetry parameter (cos(theta)) - 0.35445, 0.19838, 0.08857, & - 0.52581, 0.32384, 0.14970, & - 0.83162, 0.78306, 0.74375, & - 0.68861, 0.70836, 0.54171, & - 0.70239, 0.66115, 0.71983, & - 0.78734, 0.73580, 0.64411/), & +! 0.35445, 0.19838, 0.08857, & +! 0.52581, 0.32384, 0.14970, & +! 0.83162, 0.78306, 0.74375, & +! 0.68861, 0.70836, 0.54171, & +! 0.70239, 0.66115, 0.71983, & +! 0.78734, 0.73580, 0.64411/), & + 0.35445_dbl_kind, 0.19838_dbl_kind, 0.08857_dbl_kind, & + 0.52581_dbl_kind, 0.32384_dbl_kind, 0.14970_dbl_kind, & + 0.83162_dbl_kind, 0.78306_dbl_kind, 0.74375_dbl_kind, & + 0.68861_dbl_kind, 0.70836_dbl_kind, 0.54171_dbl_kind, & + 0.70239_dbl_kind, 0.66115_dbl_kind, 0.71983_dbl_kind, & + 0.78734_dbl_kind, 0.73580_dbl_kind, 0.64411_dbl_kind/), & (/icepack_nspint,icepack_max_aero/)) ! this data is used in MODAL AEROSOL treatment in dEdd radiation kaer_bc_tab = reshape((/ & ! aerosol mass extinction cross section (m2/kg) - 12955.44732, 5946.89461, 2772.33366, & - 12085.30664, 7438.83131, 3657.13084, & - 9753.99698, 7342.87139, 4187.79304, & - 7815.74879, 6659.65096, 4337.98863, & - 6381.28194, 5876.78408, 4254.65054, & - 5326.93163, 5156.74532, 4053.66581, & - 4538.09763, 4538.60875, 3804.10884, & - 3934.17604, 4020.20799, 3543.27199, & - 3461.20656, 3587.80962, 3289.98060, & - 3083.03396, 3226.27231, 3052.91441/), & +! 12955.44732, 5946.89461, 2772.33366, & +! 12085.30664, 7438.83131, 3657.13084, & +! 9753.99698, 7342.87139, 4187.79304, & +! 7815.74879, 6659.65096, 4337.98863, & +! 6381.28194, 5876.78408, 4254.65054, & +! 5326.93163, 5156.74532, 4053.66581, & +! 4538.09763, 4538.60875, 3804.10884, & +! 3934.17604, 4020.20799, 3543.27199, & +! 3461.20656, 3587.80962, 3289.98060, & +! 3083.03396, 3226.27231, 3052.91441/), & + 12955.4473151973_dbl_kind, 5946.89461205564_dbl_kind, 2772.33366387720_dbl_kind, & + 12085.3066388712_dbl_kind, 7438.83131367992_dbl_kind, 3657.13084442081_dbl_kind, & + 9753.99697536893_dbl_kind, 7342.87139082553_dbl_kind, 4187.79303607928_dbl_kind, & + 7815.74879345131_dbl_kind, 6659.65096365965_dbl_kind, 4337.98863414228_dbl_kind, & + 6381.28194381772_dbl_kind, 5876.78408231865_dbl_kind, 4254.65053724305_dbl_kind, & + 5326.93163497508_dbl_kind, 5156.74531505734_dbl_kind, 4053.66581550147_dbl_kind, & + 4538.09762614960_dbl_kind, 4538.60874501597_dbl_kind, 3804.10884202567_dbl_kind, & + 3934.17604000777_dbl_kind, 4020.20798667897_dbl_kind, 3543.27199302277_dbl_kind, & + 3461.20655708248_dbl_kind, 3587.80961820605_dbl_kind, 3289.98060303894_dbl_kind, & + 3083.03396032095_dbl_kind, 3226.27231329114_dbl_kind, 3052.91440681137_dbl_kind/), & (/icepack_nspint,10/)) waer_bc_tab = reshape((/ & ! aerosol single scatter albedo (fraction) - 0.26107, 0.15861, 0.06535, & - 0.37559, 0.30318, 0.19483, & - 0.42224, 0.36913, 0.27875, & - 0.44777, 0.40503, 0.33026, & - 0.46444, 0.42744, 0.36426, & - 0.47667, 0.44285, 0.38827, & - 0.48635, 0.45428, 0.40617, & - 0.49440, 0.46328, 0.42008, & - 0.50131, 0.47070, 0.43128, & - 0.50736, 0.47704, 0.44056/), & +! 0.26107, 0.15861, 0.06535, & +! 0.37559, 0.30318, 0.19483, & +! 0.42224, 0.36913, 0.27875, & +! 0.44777, 0.40503, 0.33026, & +! 0.46444, 0.42744, 0.36426, & +! 0.47667, 0.44285, 0.38827, & +! 0.48635, 0.45428, 0.40617, & +! 0.49440, 0.46328, 0.42008, & +! 0.50131, 0.47070, 0.43128, & +! 0.50736, 0.47704, 0.44056/), & + 0.261071919959011_dbl_kind, 0.158608047940651_dbl_kind, 0.0653546447770291_dbl_kind, & + 0.375593873543050_dbl_kind, 0.303181671502553_dbl_kind, 0.194832290545495_dbl_kind, & + 0.422240383488477_dbl_kind, 0.369134186611324_dbl_kind, 0.278752556671685_dbl_kind, & + 0.447772153910671_dbl_kind, 0.405033725319593_dbl_kind, 0.330260831965086_dbl_kind, & + 0.464443094570456_dbl_kind, 0.427439117980081_dbl_kind, 0.364256689383418_dbl_kind, & + 0.476668995985241_dbl_kind, 0.442854173154887_dbl_kind, 0.388270470928338_dbl_kind, & + 0.486347881475941_dbl_kind, 0.454284736567521_dbl_kind, 0.406167596922937_dbl_kind, & + 0.494397834153785_dbl_kind, 0.463279526357470_dbl_kind, 0.420084410794128_dbl_kind, & + 0.501307856563459_dbl_kind, 0.470696914968199_dbl_kind, 0.431284889617716_dbl_kind, & + 0.507362336297419_dbl_kind, 0.477038272961243_dbl_kind, 0.440559363958571_dbl_kind/), & (/icepack_nspint,10/)) gaer_bc_tab = reshape((/ & ! aerosol asymmetry parameter (cos(theta)) - 0.28328, 0.19644, 0.10498, & - 0.44488, 0.32615, 0.19612, & - 0.54724, 0.41611, 0.26390, & - 0.61711, 0.48475, 0.31922, & - 0.66673, 0.53923, 0.36632, & - 0.70296, 0.58337, 0.40732, & - 0.73002, 0.61960, 0.44344, & - 0.75064, 0.64959, 0.47551, & - 0.76663, 0.67461, 0.50415, & - 0.77926, 0.69561, 0.52981/),& +! 0.28328, 0.19644, 0.10498, & +! 0.44488, 0.32615, 0.19612, & +! 0.54724, 0.41611, 0.26390, & +! 0.61711, 0.48475, 0.31922, & +! 0.66673, 0.53923, 0.36632, & +! 0.70296, 0.58337, 0.40732, & +! 0.73002, 0.61960, 0.44344, & +! 0.75064, 0.64959, 0.47551, & +! 0.76663, 0.67461, 0.50415, & +! 0.77926, 0.69561, 0.52981/),& + 0.283282988564031_dbl_kind, 0.196444209821980_dbl_kind, 0.104976473902976_dbl_kind, & + 0.444877326083453_dbl_kind, 0.326147707342261_dbl_kind, 0.196121968923488_dbl_kind, & + 0.547243414035631_dbl_kind, 0.416106187964493_dbl_kind, 0.263903486903711_dbl_kind, & + 0.617111563012282_dbl_kind, 0.484745531707601_dbl_kind, 0.319218974395050_dbl_kind, & + 0.666728525631754_dbl_kind, 0.539228555802301_dbl_kind, 0.366323180358996_dbl_kind, & + 0.702956870835387_dbl_kind, 0.583372441336763_dbl_kind, 0.407316408184865_dbl_kind, & + 0.730016668453191_dbl_kind, 0.619595539349710_dbl_kind, 0.443436944107423_dbl_kind, & + 0.750635997128011_dbl_kind, 0.649589805870541_dbl_kind, 0.475512089138887_dbl_kind, & + 0.766634959089444_dbl_kind, 0.674609076223658_dbl_kind, 0.504145461809103_dbl_kind, & + 0.779256641759228_dbl_kind, 0.695614224933709_dbl_kind, 0.529805346632687_dbl_kind/), & (/icepack_nspint,10/)) bcenh(:,:,:) = c0 diff --git a/cicecore/cicedyn/general/ice_state.F90 b/cicecore/cicedyn/general/ice_state.F90 index 33fef2f01..862f0a8bc 100644 --- a/cicecore/cicedyn/general/ice_state.F90 +++ b/cicecore/cicedyn/general/ice_state.F90 @@ -54,7 +54,8 @@ module ice_state real (kind=dbl_kind), dimension(:,:,:), allocatable, & public :: & - aice , & ! concentration of ice + aice , & ! concentration of ice on T grid + aiU , & ! concentration of ice on U grid vice , & ! volume per unit area of ice (m) vsno ! volume per unit area of snow (m) @@ -149,7 +150,8 @@ subroutine alloc_state file=__FILE__, line=__LINE__) allocate ( & - aice (nx_block,ny_block,max_blocks) , & ! concentration of ice + aice (nx_block,ny_block,max_blocks) , & ! concentration of ice T grid + aiU (nx_block,ny_block,max_blocks) , & ! concentration of ice U grid vice (nx_block,ny_block,max_blocks) , & ! volume per unit area of ice (m) vsno (nx_block,ny_block,max_blocks) , & ! volume per unit area of snow (m) aice0 (nx_block,ny_block,max_blocks) , & ! concentration of open water diff --git a/cicecore/cicedyn/general/ice_step_mod.F90 b/cicecore/cicedyn/general/ice_step_mod.F90 index b6f8741c0..8dd6fe49a 100644 --- a/cicecore/cicedyn/general/ice_step_mod.F90 +++ b/cicecore/cicedyn/general/ice_step_mod.F90 @@ -12,7 +12,14 @@ module ice_step_mod use ice_kinds_mod + use ice_blocks, only: block, get_block + use ice_blocks, only: nx_block, ny_block use ice_constants, only: c0, c1, c1000, c4, p25 + use ice_constants, only: field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_loc_Eface, & + field_type_scalar, field_type_vector + use ice_domain, only: halo_info, nblocks, blocks_ice + use ice_domain_size, only: max_blocks use ice_exit, only: abort_ice use ice_fileunits, only: nu_diag use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -39,6 +46,10 @@ module ice_step_mod step_snow, prep_radiation, step_radiation, ocean_mixed_layer, & update_state, biogeochemistry, step_dyn_wave, step_prep + real (kind=dbl_kind), dimension (:,:,:), allocatable :: & + uvelT_icep, & ! uvel for wind stress computation in icepack + vvelT_icep ! vvel for wind stress computation in icepack + !======================================================================= contains @@ -73,6 +84,13 @@ subroutine step_prep use ice_flux, only: uatm, vatm, uatmT, vatmT use ice_grid, only: grid_atm_dynu, grid_atm_dynv, grid_average_X2Y + use ice_state, only: uvel, vvel + + logical (kind=log_kind) :: & + highfreq ! highfreq flag + + logical (kind=log_kind), save :: & + first_call = .true. ! first call flag character(len=*), parameter :: subname = '(step_prep)' @@ -85,6 +103,26 @@ subroutine step_prep call grid_average_X2Y('S',uatm,grid_atm_dynu,uatmT,'T') call grid_average_X2Y('S',vatm,grid_atm_dynv,vatmT,'T') + !----------------------------------------------------------------- + ! Compute uvelT_icep, vvelT_icep + !----------------------------------------------------------------- + + if (first_call) then + allocate(uvelT_icep(nx_block,ny_block,max_blocks)) + allocate(vvelT_icep(nx_block,ny_block,max_blocks)) + uvelT_icep = c0 + vvelT_icep = c0 + endif + + call icepack_query_parameters(highfreq_out=highfreq) + + if (highfreq) then + call grid_average_X2Y('A', uvel, 'U', uvelT_icep, 'T') + call grid_average_X2Y('A', vvel, 'U', vvelT_icep, 'T') + endif + + first_call = .false. + end subroutine step_prep !======================================================================= @@ -95,8 +133,6 @@ end subroutine step_prep subroutine prep_radiation (iblk) - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & alvdr_ai, alvdf_ai, alidr_ai, alidf_ai, & @@ -188,14 +224,9 @@ subroutine step_therm1 (dt, iblk) hfreebd, hdraft, hridge, distrdg, hkeel, dkeel, lfloe, dfloe, & fswsfcn, fswintn, Sswabsn, Iswabsn, meltsliqn, meltsliq, & fswthrun, fswthrun_vdr, fswthrun_vdf, fswthrun_idr, fswthrun_idf - use ice_blocks, only: block, get_block -#ifdef CICE_IN_NEMO - use ice_blocks, only: nx_block, ny_block -#endif use ice_calendar, only: yday - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero - use ice_flux, only: frzmlt, sst, Tf, strocnxT, strocnyT, rside, fbot, Tbot, Tsnice, & + use ice_flux, only: frzmlt, sst, Tf, strocnxT_iavg, strocnyT_iavg, rside, fbot, Tbot, Tsnice, & meltsn, melttn, meltbn, congeln, snoicen, uatmT, vatmT, fside, & wind, rhoa, potT, Qa, zlvl, zlvs, strax, stray, flatn, fsensn, fsurfn, fcondtopn, & flw, fsnow, fpond, sss, mlt_onset, frz_onset, fcondbotn, fcondbot, fsloss, & @@ -209,7 +240,7 @@ subroutine step_therm1 (dt, iblk) Qa_iso, Qref_iso, fiso_evap, HDO_ocn, H2_16O_ocn, H2_18O_ocn use ice_grid, only: lmask_n, lmask_s, tmask use ice_state, only: aice, aicen, aicen_init, vicen_init, & - vice, vicen, vsno, vsnon, trcrn, uvel, vvel, vsnon_init + vice, vicen, vsno, vsnon, trcrn, vsnon_init #ifdef CICE_IN_NEMO use ice_state, only: aice_init #endif @@ -243,16 +274,10 @@ subroutine step_therm1 (dt, iblk) nt_isosno, nt_isoice, nt_rsnw, nt_smice, nt_smliq logical (kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_iso, tr_aero, tr_pond, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_iso, tr_aero, tr_pond, & -#endif tr_pond_lvl, tr_pond_topo, calc_Tsfc, highfreq, tr_snow real (kind=dbl_kind) :: & - uvel_center, & ! cell-centered velocity, x component (m/s) - vvel_center, & ! cell-centered velocity, y component (m/s) puny ! a very small number real (kind=dbl_kind), dimension(n_aero,2,ncat) :: & @@ -275,11 +300,7 @@ subroutine step_therm1 (dt, iblk) call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_iso_out=tr_iso, & -#ifdef UNDEPRECATE_CESMPONDS - tr_aero_out=tr_aero, tr_pond_out=tr_pond, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_aero_out=tr_aero, tr_pond_out=tr_pond, & -#endif tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & tr_snow_out=tr_snow) call icepack_query_tracer_indices( & @@ -336,16 +357,6 @@ subroutine step_therm1 (dt, iblk) do j = jlo, jhi do i = ilo, ihi - if (highfreq) then ! include ice velocity in calculation of wind stress - uvel_center = p25*(uvel(i,j ,iblk) + uvel(i-1,j ,iblk) & ! cell-centered velocity - + uvel(i,j-1,iblk) + uvel(i-1,j-1,iblk)) ! assumes wind components - vvel_center = p25*(vvel(i,j ,iblk) + vvel(i-1,j ,iblk) & ! are also cell-centered - + vvel(i,j-1,iblk) + vvel(i-1,j-1,iblk)) - else - uvel_center = c0 ! not used - vvel_center = c0 - endif ! highfreq - if (tr_snow) then do n = 1, ncat do k = 1, nslyr @@ -391,8 +402,8 @@ subroutine step_therm1 (dt, iblk) vicen = vicen (i,j,:,iblk), & vsno = vsno (i,j, iblk), & vsnon = vsnon (i,j,:,iblk), & - uvel = uvel_center , & - vvel = vvel_center , & + uvel = uvelT_icep (i,j, iblk), & + vvel = vvelT_icep (i,j, iblk), & Tsfc = trcrn (i,j,nt_Tsfc,:,iblk), & zqsn = trcrn (i,j,nt_qsno:nt_qsno+nslyr-1,:,iblk), & zqin = trcrn (i,j,nt_qice:nt_qice+nilyr-1,:,iblk), & @@ -450,8 +461,8 @@ subroutine step_therm1 (dt, iblk) sst = sst (i,j, iblk), & sss = sss (i,j, iblk), & Tf = Tf (i,j, iblk), & - strocnxT = strocnxT (i,j, iblk), & - strocnyT = strocnyT (i,j, iblk), & + strocnxT = strocnxT_iavg(i,j, iblk), & + strocnyT = strocnyT_iavg(i,j, iblk), & fbot = fbot (i,j, iblk), & Tbot = Tbot (i,j, iblk), & Tsnice = Tsnice (i,j, iblk), & @@ -604,9 +615,7 @@ subroutine step_therm2 (dt, iblk) wave_spectrum, wavefreq, dwavefreq, & first_ice, bgrid, cgrid, igrid, floe_rad_c, floe_binwidth, & d_afsd_latg, d_afsd_newi, d_afsd_latm, d_afsd_weld - use ice_blocks, only: block, get_block use ice_calendar, only: yday - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, nfsd use ice_flux, only: fresh, frain, fpond, frzmlt, frazil, frz_onset, & update_ocn_f, fsalt, Tf, sss, salinz, fhocn, rside, fside, & @@ -746,8 +755,6 @@ end subroutine step_therm2 subroutine update_state (dt, daidt, dvidt, dagedt, offset) - use ice_blocks, only: nx_block, ny_block - use ice_domain, only: nblocks use ice_domain_size, only: ncat ! use ice_grid, only: tmask use ice_state, only: aicen, trcrn, vicen, vsnon, & @@ -864,8 +871,6 @@ subroutine step_dyn_wave (dt) use ice_arrays_column, only: wave_spectrum, & d_afsd_wave, floe_rad_l, floe_rad_c, wavefreq, dwavefreq - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice, nblocks use ice_domain_size, only: ncat, nfsd, nfreq use ice_state, only: trcrn, aicen, aice, vice use ice_timers, only: ice_timer_start, ice_timer_stop, timer_column, & @@ -939,16 +944,34 @@ end subroutine step_dyn_wave subroutine step_dyn_horiz (dt) + use ice_boundary, only: ice_HaloUpdate use ice_dyn_evp, only: evp use ice_dyn_eap, only: eap use ice_dyn_vp, only: implicit_solver use ice_dyn_shared, only: kdyn + use ice_flux, only: strocnxU, strocnyU, strocnxT_iavg, strocnyT_iavg use ice_flux, only: init_history_dyn + use ice_grid, only: grid_average_X2Y + use ice_state, only: aiU use ice_transport_driver, only: advection, transport_upwind, transport_remap real (kind=dbl_kind), intent(in) :: & dt ! dynamics time step + ! local variables + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: & + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + iblk, & ! block index + i, j ! horizontal indices + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + character(len=*), parameter :: subname = '(step_dyn_horiz)' call init_history_dyn ! initialize dynamic history variables @@ -961,6 +984,38 @@ subroutine step_dyn_horiz (dt) if (kdyn == 2) call eap (dt) if (kdyn == 3) call implicit_solver (dt) + !----------------------------------------------------------------- + ! Compute strocnxT_iavg, strocnyT_iavg for thermo and coupling + !----------------------------------------------------------------- + + ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T + ! conservation requires aiU be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk), iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + if (aiU(i,j,iblk) /= c0) then + work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) + work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + call ice_HaloUpdate (work1, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_HaloUpdate (work2, halo_info, & + field_loc_NEcorner, field_type_vector) + call grid_average_X2Y('F', work1, 'U', strocnxT_iavg, 'T') ! shift + call grid_average_X2Y('F', work2, 'U', strocnyT_iavg, 'T') + !----------------------------------------------------------------- ! Horizontal ice transport !----------------------------------------------------------------- @@ -983,8 +1038,6 @@ end subroutine step_dyn_horiz subroutine step_dyn_ridge (dt, ndtd, iblk) use ice_arrays_column, only: hin_max, fzsal, first_ice - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_aero, nblyr use ice_flux, only: & rdg_conv, rdg_shear, dardg1dt, dardg2dt, & @@ -1107,9 +1160,7 @@ end subroutine step_dyn_ridge subroutine step_snow (dt, iblk) - use ice_blocks, only: block, get_block use ice_calendar, only: nstreams - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nslyr, nilyr use ice_flux, only: snwcnt, wind, fresh, fhocn, fsloss, fsnow use ice_state, only: trcrn, vsno, vsnon, vicen, aicen, aice @@ -1225,9 +1276,7 @@ subroutine step_radiation (dt, iblk) alvdrn, alidrn, alvdfn, alidfn, apeffn, trcrn_sw, snowfracn, & kaer_tab, waer_tab, gaer_tab, kaer_bc_tab, waer_bc_tab, & gaer_bc_tab, bcenh, swgrid, igrid - use ice_blocks, only: block, get_block use ice_calendar, only: calendar_type, days_per_year, nextsw_cday, yday, msec - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, n_aero, nilyr, nslyr, n_zaero, n_algae, nblyr use ice_flux, only: swvdr, swvdf, swidr, swidf, coszen, fsnow use ice_grid, only: TLAT, TLON, tmask @@ -1415,7 +1464,6 @@ end subroutine step_radiation subroutine ocean_mixed_layer (dt, iblk) use ice_arrays_column, only: Cdn_atm, Cdn_atm_ratio - use ice_blocks, only: nx_block, ny_block use ice_flux, only: sst, Tf, Qa, uatmT, vatmT, wind, potT, rhoa, zlvl, & frzmlt, fhocn, fswthru, flw, flwout_ocn, fsens_ocn, flat_ocn, evap_ocn, & alvdr_ocn, alidr_ocn, alvdf_ocn, alidf_ocn, swidf, swvdf, swidr, swvdr, & @@ -1572,8 +1620,6 @@ subroutine biogeochemistry (dt, iblk) snow_bio_net, fswthrun, Rayleigh_criteria, & ocean_bio_all, sice_rho, fzsal, fzsal_g, & bgrid, igrid, icgrid, cgrid - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice use ice_domain_size, only: nblyr, nilyr, nslyr, n_algae, n_zaero, ncat, & n_doc, n_dic, n_don, n_fed, n_fep use ice_flux, only: meltbn, melttn, congeln, snoicen, & diff --git a/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 index 284d74818..fe8432a2d 100644 --- a/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 @@ -81,6 +81,7 @@ module ice_boundary module procedure ice_HaloUpdate2DR8, & ice_HaloUpdate2DR4, & ice_HaloUpdate2DI4, & + ice_HaloUpdate2DL1, & ice_HaloUpdate3DR8, & ice_HaloUpdate3DR4, & ice_HaloUpdate3DI4, & @@ -2349,6 +2350,69 @@ subroutine ice_HaloUpdate2DI4(array, halo, & end subroutine ice_HaloUpdate2DI4 +!*********************************************************************** + + subroutine ice_HaloUpdate2DL1(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 2d horizontal logical arrays. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + integer (int_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + logical (log_kind), dimension(:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:,:), allocatable :: & + iarray ! integer array for logical + + character(len=*), parameter :: subname = '(ice_HaloUpdate2DL1)' + +!----------------------------------------------------------------------- +! +! copy logical into integer array and call haloupdate on integer array +! +!----------------------------------------------------------------------- + + allocate(iarray(size(array,dim=1),size(array,dim=2),size(array,dim=3))) + iarray(:,:,:) = 0 + where (array) iarray = 1 + + call ice_HaloUpdate(iarray, halo, & + fieldLoc, fieldKind, & + fillValue) + + array = .false. + where (iarray /= 0) array = .true. + deallocate(iarray) + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate2DL1 + !*********************************************************************** subroutine ice_HaloUpdate3DR8(array, halo, & diff --git a/cicecore/cicedyn/infrastructure/comm/mpi/ice_global_reductions.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_global_reductions.F90 index fc7f59ad9..1f7592749 100644 --- a/cicecore/cicedyn/infrastructure/comm/mpi/ice_global_reductions.F90 +++ b/cicecore/cicedyn/infrastructure/comm/mpi/ice_global_reductions.F90 @@ -36,7 +36,6 @@ module ice_global_reductions private public :: global_sum, & - global_allreduce_sum, & global_sum_prod, & global_maxval, & global_minval @@ -65,12 +64,6 @@ module ice_global_reductions global_sum_scalar_int end interface - interface global_allreduce_sum - module procedure global_allreduce_sum_vector_dbl!, & - ! module procedure global_allreduce_sum_vector_real, & ! not yet implemented - ! module procedure global_allreduce_sum_vector_int ! not yet implemented - end interface - interface global_sum_prod module procedure global_sum_prod_dbl, & global_sum_prod_real, & @@ -756,68 +749,6 @@ function global_sum_scalar_int(scalar, dist) & end function global_sum_scalar_int -!*********************************************************************** - - function global_allreduce_sum_vector_dbl(vector, dist) & - result(globalSums) - -! Computes the global sums of sets of scalars (elements of 'vector') -! distributed across a parallel machine. -! -! This is actually the specific interface for the generic global_allreduce_sum -! function corresponding to double precision vectors. The generic -! interface is identical but will handle real and integer vectors. - - real (dbl_kind), dimension(:), intent(in) :: & - vector ! vector whose components are to be summed - - type (distrb), intent(in) :: & - dist ! block distribution - - real (dbl_kind), dimension(size(vector)) :: & - globalSums ! resulting array of global sums - -!----------------------------------------------------------------------- -! -! local variables -! -!----------------------------------------------------------------------- - - integer (int_kind) :: & - numProcs, &! number of processor participating - numBlocks, &! number of local blocks - communicator, &! communicator for this distribution - numElem ! number of elements in vector - - real (dbl_kind), dimension(:,:), allocatable :: & - work ! temporary local array - - character(len=*), parameter :: subname = '(global_allreduce_sum_vector_dbl)' - -!----------------------------------------------------------------------- -! -! get communicator for MPI calls -! -!----------------------------------------------------------------------- - - call ice_distributionGet(dist, & - numLocalBlocks = numBlocks, & - nprocs = numProcs, & - communicator = communicator) - - numElem = size(vector) - allocate(work(1,numElem)) - work(1,:) = vector - globalSums = c0 - - call compute_sums_dbl(work,globalSums,communicator,numProcs) - - deallocate(work) - -!----------------------------------------------------------------------- - - end function global_allreduce_sum_vector_dbl - !*********************************************************************** function global_sum_prod_dbl (array1, array2, dist, field_loc, & diff --git a/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 index 89a2b04f8..cca7ea849 100644 --- a/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 +++ b/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 @@ -68,6 +68,7 @@ module ice_boundary module procedure ice_HaloUpdate2DR8, & ice_HaloUpdate2DR4, & ice_HaloUpdate2DI4, & + ice_HaloUpdate2DL1, & ice_HaloUpdate3DR8, & ice_HaloUpdate3DR4, & ice_HaloUpdate3DI4, & @@ -1515,6 +1516,69 @@ subroutine ice_HaloUpdate2DI4(array, halo, & end subroutine ice_HaloUpdate2DI4 +!*********************************************************************** + + subroutine ice_HaloUpdate2DL1(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 2d horizontal logical arrays. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + integer (int_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + logical (log_kind), dimension(:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:,:), allocatable :: & + iarray ! integer array for logical + + character(len=*), parameter :: subname = '(ice_HaloUpdate2DL1)' + +!----------------------------------------------------------------------- +! +! copy logical into integer array and call haloupdate on integer array +! +!----------------------------------------------------------------------- + + allocate(iarray(size(array,dim=1),size(array,dim=2),size(array,dim=3))) + iarray(:,:,:) = 0 + where (array) iarray = 1 + + call ice_HaloUpdate(iarray, halo, & + fieldLoc, fieldKind, & + fillValue) + + array = .false. + where (iarray /= 0) array = .true. + deallocate(iarray) + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate2DL1 + !*********************************************************************** subroutine ice_HaloUpdate3DR8(array, halo, & diff --git a/cicecore/cicedyn/infrastructure/comm/serial/ice_global_reductions.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_global_reductions.F90 index 63431308a..e4eb95b56 100644 --- a/cicecore/cicedyn/infrastructure/comm/serial/ice_global_reductions.F90 +++ b/cicecore/cicedyn/infrastructure/comm/serial/ice_global_reductions.F90 @@ -37,7 +37,6 @@ module ice_global_reductions private public :: global_sum, & - global_allreduce_sum, & global_sum_prod, & global_maxval, & global_minval @@ -66,12 +65,6 @@ module ice_global_reductions global_sum_scalar_int end interface - interface global_allreduce_sum - module procedure global_allreduce_sum_vector_dbl!, & - ! module procedure global_allreduce_sum_vector_real, & ! not yet implemented - ! module procedure global_allreduce_sum_vector_int ! not yet implemented - end interface - interface global_sum_prod module procedure global_sum_prod_dbl, & global_sum_prod_real, & @@ -757,68 +750,6 @@ function global_sum_scalar_int(scalar, dist) & end function global_sum_scalar_int -!*********************************************************************** - - function global_allreduce_sum_vector_dbl(vector, dist) & - result(globalSums) - -! Computes the global sums of sets of scalars (elements of 'vector') -! distributed across a parallel machine. -! -! This is actually the specific interface for the generic global_allreduce_sum -! function corresponding to double precision vectors. The generic -! interface is identical but will handle real and integer vectors. - - real (dbl_kind), dimension(:), intent(in) :: & - vector ! vector whose components are to be summed - - type (distrb), intent(in) :: & - dist ! block distribution - - real (dbl_kind), dimension(size(vector)) :: & - globalSums ! resulting array of global sums - -!----------------------------------------------------------------------- -! -! local variables -! -!----------------------------------------------------------------------- - - integer (int_kind) :: & - numProcs, &! number of processor participating - numBlocks, &! number of local blocks - communicator, &! communicator for this distribution - numElem ! number of elements in vector - - real (dbl_kind), dimension(:,:), allocatable :: & - work ! temporary local array - - character(len=*), parameter :: subname = '(global_allreduce_sum_vector_dbl)' - -!----------------------------------------------------------------------- -! -! get communicator for MPI calls -! -!----------------------------------------------------------------------- - - call ice_distributionGet(dist, & - numLocalBlocks = numBlocks, & - nprocs = numProcs, & - communicator = communicator) - - numElem = size(vector) - allocate(work(1,numElem)) - work(1,:) = vector - globalSums = c0 - - call compute_sums_dbl(work,globalSums,communicator,numProcs) - - deallocate(work) - -!----------------------------------------------------------------------- - - end function global_allreduce_sum_vector_dbl - !*********************************************************************** function global_sum_prod_dbl (array1, array2, dist, field_loc, & diff --git a/cicecore/cicedyn/infrastructure/ice_domain.F90 b/cicecore/cicedyn/infrastructure/ice_domain.F90 index ac56356e5..10254aa93 100644 --- a/cicecore/cicedyn/infrastructure/ice_domain.F90 +++ b/cicecore/cicedyn/infrastructure/ice_domain.F90 @@ -234,6 +234,7 @@ subroutine init_domain_blocks #if (defined CESMCOUPLED) nprocs = get_num_procs() #else + write(nu_diag,*) subname,'ERROR: nprocs, get_num_procs = ',nprocs,get_num_procs() call abort_ice(subname//'ERROR: Input nprocs not same as system request') #endif else if (nghost < 1) then diff --git a/cicecore/cicedyn/infrastructure/ice_restart_driver.F90 b/cicecore/cicedyn/infrastructure/ice_restart_driver.F90 index 64b8d2101..bd5a49eaf 100644 --- a/cicecore/cicedyn/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedyn/infrastructure/ice_restart_driver.F90 @@ -55,15 +55,16 @@ subroutine dumpfile(filename_spec) use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, & + strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: grid_ice, tmask, iceumask, iceemask, icenmask + use ice_grid, only: grid_ice, tmask use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel, & uvelE, vvelE, uvelN, vvelN @@ -175,8 +176,8 @@ subroutine dumpfile(filename_spec) !----------------------------------------------------------------- ! ocean stress (for bottom heat flux in thermo) !----------------------------------------------------------------- - call write_restart_field(nu_dump,0,strocnxT,'ruf8','strocnxT',1,diag) - call write_restart_field(nu_dump,0,strocnyT,'ruf8','strocnyT',1,diag) + call write_restart_field(nu_dump,0,strocnxT_iavg,'ruf8','strocnxT',1,diag) + call write_restart_field(nu_dump,0,strocnyT_iavg,'ruf8','strocnyT',1,diag) !----------------------------------------------------------------- ! internal stress @@ -220,7 +221,7 @@ subroutine dumpfile(filename_spec) do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (iceumask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceUmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo @@ -234,7 +235,7 @@ subroutine dumpfile(filename_spec) do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (icenmask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceNmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo @@ -246,7 +247,7 @@ subroutine dumpfile(filename_spec) do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (iceemask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceEmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo @@ -276,16 +277,16 @@ subroutine restartfile (ice_ic) use ice_domain, only: nblocks, halo_info use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, & + strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: tmask, grid_type, grid_ice, & - iceumask, iceemask, icenmask + use ice_grid, only: tmask, grid_type, grid_ice, grid_average_X2Y use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & uvelE, vvelE, uvelN, vvelN, & @@ -428,9 +429,9 @@ subroutine restartfile (ice_ic) if (my_task == master_task) & write(nu_diag,*) 'min/max ocean stress components' - call read_restart_field(nu_restart,0,strocnxT,'ruf8', & + call read_restart_field(nu_restart,0,strocnxT_iavg,'ruf8', & 'strocnxT',1,diag,field_loc_center, field_type_vector) - call read_restart_field(nu_restart,0,strocnyT,'ruf8', & + call read_restart_field(nu_restart,0,strocnyT_iavg,'ruf8', & 'strocnyT',1,diag,field_loc_center, field_type_vector) !----------------------------------------------------------------- @@ -529,12 +530,12 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,work1,'ruf8', & 'iceumask',1,diag,field_loc_center, field_type_scalar) - iceumask(:,:,:) = .false. + iceUmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceumask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. enddo enddo enddo @@ -546,12 +547,12 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,work1,'ruf8', & 'icenmask',1,diag,field_loc_center, field_type_scalar) - icenmask(:,:,:) = .false. + iceNmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) icenmask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceNmask(i,j,iblk) = .true. enddo enddo enddo @@ -562,12 +563,12 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,work1,'ruf8', & 'iceemask',1,diag,field_loc_center, field_type_scalar) - iceemask(:,:,:) = .false. + iceEmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceemask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceEmask(i,j,iblk) = .true. enddo enddo enddo @@ -707,13 +708,14 @@ subroutine restartfile_v4 (ice_ic) use ice_domain, only: nblocks, distrb_info use ice_domain_size, only: nilyr, nslyr, ncat, nx_global, ny_global, & max_blocks + use ice_dyn_shared, only: iceUmask use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, & + strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_gather_scatter, only: scatter_global_stress - use ice_grid, only: tmask, iceumask + use ice_grid, only: tmask use ice_read_write, only: ice_open, ice_read, ice_read_global use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & @@ -873,9 +875,9 @@ subroutine restartfile_v4 (ice_ic) if (my_task == master_task) & write(nu_diag,*) 'min/max ocean stress components' - call ice_read(nu_restart,0,strocnxT,'ruf8',diag, & + call ice_read(nu_restart,0,strocnxT_iavg,'ruf8',diag, & field_loc_center, field_type_vector) - call ice_read(nu_restart,0,strocnyT,'ruf8',diag, & + call ice_read(nu_restart,0,strocnyT_iavg,'ruf8',diag, & field_loc_center, field_type_vector) !----------------------------------------------------------------- @@ -942,12 +944,12 @@ subroutine restartfile_v4 (ice_ic) call ice_read(nu_restart,0,work1,'ruf8',diag, & field_loc_center, field_type_scalar) - iceumask(:,:,:) = .false. + iceUmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceumask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. enddo enddo enddo diff --git a/cicecore/cicedyn/infrastructure/io/io_binary/ice_restart.F90 b/cicecore/cicedyn/infrastructure/io/io_binary/ice_restart.F90 index d21a3385e..081d47557 100644 --- a/cicecore/cicedyn/infrastructure/io/io_binary/ice_restart.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_binary/ice_restart.F90 @@ -57,11 +57,7 @@ subroutine init_restart_read(ice_ic) logical (kind=log_kind) :: & solve_zsal, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow character(len=char_len_long) :: & @@ -86,11 +82,7 @@ subroutine init_restart_read(ice_ic) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -235,28 +227,6 @@ subroutine init_restart_read(ice_ic) endif endif -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - if (my_task == master_task) then - n = index(filename0,trim(restart_file)) - if (n == 0) call abort_ice(subname//'ERROR: pond_cesm restart: filename discrepancy') - string1 = trim(filename0(1:n-1)) - string2 = trim(filename0(n+lenstr(restart_file):lenstr(filename0))) - write(filename,'(a,a,a,a)') & - string1(1:lenstr(string1)), & - restart_file(1:lenstr(restart_file)),'.pond_cesm', & - string2(1:lenstr(string2)) - if (restart_ext) then - call ice_open_ext(nu_restart_pond,filename,0) - else - call ice_open(nu_restart_pond,filename,0) - endif - read (nu_restart_pond) iignore,rignore,rignore - write(nu_diag,*) 'Reading ',filename(1:lenstr(filename)) - endif - endif -#endif - if (tr_pond_lvl) then if (my_task == master_task) then n = index(filename0,trim(restart_file)) @@ -423,11 +393,7 @@ subroutine init_restart_write(filename_spec) logical (kind=log_kind) :: & solve_zsal, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow integer (kind=int_kind) :: & @@ -443,11 +409,7 @@ subroutine init_restart_write(filename_spec) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -580,28 +542,6 @@ subroutine init_restart_write(filename_spec) endif -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - - write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & - restart_dir(1:lenstr(restart_dir)), & - restart_file(1:lenstr(restart_file)),'.pond_cesm.', & - myear,'-',mmonth,'-',mday,'-',msec - - if (restart_ext) then - call ice_open_ext(nu_dump_pond,filename,0) - else - call ice_open(nu_dump_pond,filename,0) - endif - - if (my_task == master_task) then - write(nu_dump_pond) istep1,timesecs,time_forc - write(nu_diag,*) 'Writing ',filename(1:lenstr(filename)) - endif - - endif -#endif - if (tr_pond_lvl) then write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & @@ -870,11 +810,7 @@ subroutine final_restart() logical (kind=log_kind) :: & solve_zsal, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow integer (kind=int_kind) :: & @@ -888,11 +824,7 @@ subroutine final_restart() nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -907,9 +839,6 @@ subroutine final_restart() if (tr_iage) close(nu_dump_age) if (tr_FY) close(nu_dump_FY) if (tr_lvl) close(nu_dump_lvl) -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) close(nu_dump_pond) -#endif if (tr_pond_lvl) close(nu_dump_pond) if (tr_pond_topo) close(nu_dump_pond) if (tr_snow) close(nu_dump_snow) diff --git a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_restart.F90 index f647bd96b..ed49a48f5 100644 --- a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_restart.F90 @@ -145,11 +145,7 @@ subroutine init_restart_write(filename_spec) logical (kind=log_kind) :: & solve_zsal, skl_bgc, z_tracers, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & tr_bgc_Sil, tr_bgc_DMS, & @@ -185,11 +181,7 @@ subroutine init_restart_write(filename_spec) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine, & tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & @@ -416,13 +408,6 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'vlvl',dims) end if -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - call define_rest_field(ncid,'apnd',dims) - call define_rest_field(ncid,'hpnd',dims) - end if -#endif - if (tr_pond_topo) then call define_rest_field(ncid,'apnd',dims) call define_rest_field(ncid,'hpnd',dims) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index c2060285a..8eab5e260 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -85,7 +85,7 @@ subroutine evp (dt) use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strocnxT, strocnyT, strax, stray, & + strax, stray, & TbU, hwater, & strairxN, strairyN, fmN, & strtltxN, strtltyN, strocnxN, strocnyN, strintxN, strintyN, taubxN, taubyN, & @@ -99,14 +99,13 @@ subroutine evp (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_grid, only: tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & - iceumask, iceemask, icenmask, & dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, earear, narear, grid_average_X2Y, uarea, & grid_type, grid_ice, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & + use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, uvelN, vvelN, & uvelE, vvelE, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & @@ -116,6 +115,7 @@ subroutine evp (dt) use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, & DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & strain_rates_U, & + iceTmask, iceUmask, iceEmask, iceNmask, & dyn_haloUpdate real (kind=dbl_kind), intent(in) :: & @@ -130,32 +130,32 @@ subroutine evp (dt) i, j, ij ! local indices integer (kind=int_kind), dimension(max_blocks) :: & - icellt , & ! no. of cells where icetmask = 1 - icelln , & ! no. of cells where icenmask = .true. - icelle , & ! no. of cells where iceemask = .true. - icellu ! no. of cells where iceumask = .true. + icellT , & ! no. of cells where iceTmask = .true. + icellN , & ! no. of cells where iceNmask = .true. + icellE , & ! no. of cells where iceEmask = .true. + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxei , & ! compressed index in i-direction - indxej , & ! compressed index in j-direction - indxni , & ! compressed index in i-direction - indxnj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj , & ! compressed index in j-direction + indxEi , & ! compressed index in i-direction + indxEj , & ! compressed index in j-direction + indxNi , & ! compressed index in i-direction + indxNj , & ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & uocnU , & ! i ocean current (m/s) vocnU , & ! j ocean current (m/s) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) + cdn_ocnU , & ! ocn drag coefficient tmass , & ! total mass of ice and snow (kg/m^2) waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) forcexU , & ! work array: combined atm stress and ocn tilt, x forceyU , & ! work array: combined atm stress and ocn tilt, y - aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -164,6 +164,7 @@ subroutine evp (dt) vocnN , & ! j ocean current (m/s) ss_tltxN , & ! sea surface slope, x-direction (m/m) ss_tltyN , & ! sea surface slope, y-direction (m/m) + cdn_ocnN , & ! ocn drag coefficient waterxN , & ! for ocean stress calculation, x (m/s) wateryN , & ! for ocean stress calculation, y (m/s) forcexN , & ! work array: combined atm stress and ocn tilt, x @@ -177,6 +178,7 @@ subroutine evp (dt) vocnE , & ! j ocean current (m/s) ss_tltxE , & ! sea surface slope, x-direction (m/m) ss_tltyE , & ! sea surface slope, y-direction (m/m) + cdn_ocnE , & ! ocn drag coefficient waterxE , & ! for ocean stress calculation, x (m/s) wateryE , & ! for ocean stress calculation, y (m/s) forcexE , & ! work array: combined atm stress and ocn tilt, x @@ -186,9 +188,9 @@ subroutine evp (dt) emassdti ! mass of E-cell/dte (kg/m^2 s) real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:) , & ! 2 bundled fields - fld3(:,:,:,:) , & ! 3 bundled fields - fld4(:,:,:,:) ! 4 bundled fields + fld2(:,:,:,:), & ! 2 bundled fields + fld3(:,:,:,:), & ! 3 bundled fields + fld4(:,:,:,:) ! 4 bundled fields real (kind=dbl_kind), allocatable :: & strengthU(:,:,:), & ! strength averaged to U points @@ -208,7 +210,6 @@ subroutine evp (dt) calc_strair ! calculate air/ice stress integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask, & ! ice extent mask (T-cell) halomask ! generic halo mask type (ice_halo) :: & @@ -217,10 +218,6 @@ subroutine evp (dt) type (block) :: & this_block ! block information for current block - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1, & ! temporary - work2 ! temporary - logical (kind=log_kind), save :: & first_time = .true. ! first time logical @@ -303,13 +300,13 @@ subroutine evp (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), icetmask(:,:,iblk)) + tmass (:,:,iblk), iceTmask(:,:,iblk)) enddo ! iblk !$OMP END PARALLEL DO call ice_timer_start(timer_bound) - call ice_HaloUpdate (icetmask, halo_info, & + call ice_HaloUpdate (iceTmask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -317,22 +314,34 @@ subroutine evp (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F', tmass , 'T' , umass , 'U') - call grid_average_X2Y('F', aice_init, 'T' , aiU , 'U') + call stack_fields(tmass, aice_init, cdn_ocn, fld3) + call ice_HaloUpdate (fld3, halo_info, & + field_loc_center, field_type_scalar) + call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) + call ice_HaloUpdate (fld4, halo_info, & + field_loc_center, field_type_vector) + call unstack_fields(fld3, tmass, aice_init, cdn_ocn) + call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) + + call grid_average_X2Y('S', tmass , 'T' , umass , 'U') + call grid_average_X2Y('S', aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnU, 'U') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyU, 'U') if (grid_ice == 'CD' .or. grid_ice == 'C') then - call grid_average_X2Y('F', tmass , 'T' , emass , 'E') - call grid_average_X2Y('F', aice_init, 'T' , aie , 'E') + call grid_average_X2Y('S', tmass , 'T' , emass , 'E') + call grid_average_X2Y('S', aice_init, 'T' , aie , 'E') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnE, 'E') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnE , 'E') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnE , 'E') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxE, 'E') call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyE, 'E') - call grid_average_X2Y('F', tmass , 'T' , nmass , 'N') - call grid_average_X2Y('F', aice_init, 'T' , ain , 'N') + call grid_average_X2Y('S', tmass , 'T' , nmass , 'N') + call grid_average_X2Y('S', aice_init, 'T' , ain , 'N') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnN, 'N') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnN , 'N') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnN , 'N') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxN, 'N') @@ -390,16 +399,16 @@ subroutine evp (dt) if (trim(grid_ice) == 'B') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icellu (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT (iblk), icellU (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -420,16 +429,16 @@ subroutine evp (dt) elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icellu (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT (iblk), icellU (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umaskCD (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -453,9 +462,9 @@ subroutine evp (dt) !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellt(iblk) - i = indxti(ij, iblk) - j = indxtj(ij, iblk) + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) call icepack_ice_strength(ncat = ncat, & aice = aice (i,j, iblk), & vice = vice (i,j, iblk), & @@ -485,16 +494,16 @@ subroutine evp (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icelln (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellT (iblk), icellN (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & aiN (:,:,iblk), nmass (:,:,iblk), & nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & nmask (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & strairxN (:,:,iblk), strairyN (:,:,iblk), & ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & - icetmask (:,:,iblk), icenmask (:,:,iblk), & + iceTmask (:,:,iblk), iceNmask (:,:,iblk), & fmN (:,:,iblk), dt, & strtltxN (:,:,iblk), strtltyN (:,:,iblk), & strocnxN (:,:,iblk), strocnyN (:,:,iblk), & @@ -518,16 +527,16 @@ subroutine evp (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icelle (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellT (iblk), icellE (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & aiE (:,:,iblk), emass (:,:,iblk), & emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & emask (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & strairxE (:,:,iblk), strairyE (:,:,iblk), & ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & - icetmask (:,:,iblk), iceemask (:,:,iblk), & + iceTmask (:,:,iblk), iceEmask (:,:,iblk), & fmE (:,:,iblk), dt, & strtltxE (:,:,iblk), strtltyE (:,:,iblk), & strocnxE (:,:,iblk), strocnyE (:,:,iblk), & @@ -548,12 +557,12 @@ subroutine evp (dt) do i=1,nx_block do j=1,ny_block - if (.not.iceumask(i,j,iblk)) then + if (.not.iceUmask(i,j,iblk)) then stresspU (i,j,iblk) = c0 stressmU (i,j,iblk) = c0 stress12U(i,j,iblk) = c0 endif - if (icetmask(i,j,iblk) == 0) then + if (.not.iceTmask(i,j,iblk)) then stresspT (i,j,iblk) = c0 stressmT (i,j,iblk) = c0 stress12T(i,j,iblk) = c0 @@ -612,7 +621,7 @@ subroutine evp (dt) if (maskhalo_dyn) then halomask = 0 if (grid_ice == 'B') then - where (iceumask) halomask = 1 + where (iceUmask) halomask = 1 elseif (grid_ice == 'C' .or. grid_ice == 'CD') then !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) do iblk = 1, nblocks @@ -623,11 +632,11 @@ subroutine evp (dt) jhi = this_block%jhi do j = jlo,jhi do i = ilo,ihi - if (icetmask(i ,j ,iblk) /= 0 .or. & - icetmask(i-1,j ,iblk) /= 0 .or. & - icetmask(i+1,j ,iblk) /= 0 .or. & - icetmask(i ,j-1,iblk) /= 0 .or. & - icetmask(i ,j+1,iblk) /= 0) then + if (iceTmask(i ,j ,iblk) .or. & + iceTmask(i-1,j ,iblk) .or. & + iceTmask(i+1,j ,iblk) .or. & + iceTmask(i ,j-1,iblk) .or. & + iceTmask(i ,j+1,iblk)) then halomask(i,j,iblk) = 1 endif enddo @@ -654,8 +663,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbU (:,:,iblk)) enddo @@ -665,8 +674,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block , ny_block , & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & hwater (:,:,iblk), TbU (:,:,iblk)) enddo @@ -679,13 +688,13 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block , ny_block, & - icelle (iblk), & - indxei (:,iblk), indxej(:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbE (:,:,iblk)) call seabed_stress_factor_LKD (nx_block , ny_block, & - icelln (iblk), & - indxni (:,iblk), indxnj(:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbN (:,:,iblk)) enddo @@ -695,13 +704,13 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block , ny_block , & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & hwater (:,:,iblk), TbU (:,:,iblk) , & TbE (:,:,iblk), TbN (:,:,iblk) , & - icelle(iblk), indxei(:,iblk), indxej(:,iblk), & - icelln(iblk), indxni(:,iblk), indxnj(:,iblk) ) + icellE(iblk), indxEi(:,iblk), indxEj(:,iblk), & + icellN(iblk), indxNi(:,iblk), indxNj(:,iblk) ) enddo !$OMP END PARALLEL DO endif @@ -724,8 +733,8 @@ subroutine evp (dt) call ice_timer_start(timer_evp_1d) call ice_dyn_evp_1d_copyin( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & - icetmask, iceumask, & - cdn_ocn,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & + iceTmask, iceUmask, & + cdn_ocnU,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & umassdti,fmU,uarear,tarear,strintxU,strintyU,uvel_init,vvel_init,& strength,uvel,vvel,dxT,dyT, & stressp_1 ,stressp_2, stressp_3, stressp_4, & @@ -745,9 +754,10 @@ subroutine evp (dt) else ! evp_algorithm == standard_2d (Standard CICE) call ice_timer_start(timer_evp_2d) - do ksub = 1,ndte ! subcycling - if (grid_ice == "B") then + if (grid_ice == "B") then + + do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) do iblk = 1, nblocks @@ -756,8 +766,8 @@ subroutine evp (dt) ! stress tensor equation, total surface stress !----------------------------------------------------------------- call stress (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & @@ -773,28 +783,12 @@ subroutine evp (dt) stress12_3(:,:,iblk), stress12_4(:,:,iblk), & strtmp (:,:,:) ) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - call deformations (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) - endif - !----------------------------------------------------------------- ! momentum equation !----------------------------------------------------------------- call stepu (nx_block , ny_block , & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & waterxU (:,:,iblk), wateryU (:,:,iblk), & @@ -810,7 +804,37 @@ subroutine evp (dt) enddo ! iblk !$OMP END PARALLEL DO - elseif (grid_ice == "C") then + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) + + enddo ! sub cycling + + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call deformations (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + cxp (:,:,iblk), cyp (:,:,iblk), & + cxm (:,:,iblk), cym (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) + enddo + !$OMP END PARALLEL DO + + + elseif (grid_ice == "C") then + + do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -820,8 +844,8 @@ subroutine evp (dt) ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- call strain_rates_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & @@ -837,15 +861,15 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_NEcorner, field_type_scalar, & shearU) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressC_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & @@ -855,29 +879,11 @@ subroutine evp (dt) zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk)) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - - call deformationsC_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), uarea (:,:,iblk), & - shearU (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - - endif enddo !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & zetax2T, etax2T, stresspT, stressmT) @@ -890,8 +896,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressC_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uarea (:,:,iblk), & etax2U (:,:,iblk), deltaU (:,:,iblk), & strengthU (:,:,iblk), shearU (:,:,iblk), & @@ -900,7 +906,7 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info , halo_info_mask, & + call dyn_haloUpdate (halo_info , halo_info_mask, & field_loc_NEcorner, field_type_scalar, & stress12U) @@ -908,8 +914,8 @@ subroutine evp (dt) do iblk = 1, nblocks call div_stress_Ex (nx_block , ny_block , & - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & earear (:,:,iblk) , & @@ -917,8 +923,8 @@ subroutine evp (dt) stress12U (:,:,iblk), strintxE (:,:,iblk) ) call div_stress_Ny (nx_block , ny_block , & - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & narear (:,:,iblk) , & @@ -932,8 +938,8 @@ subroutine evp (dt) do iblk = 1, nblocks call stepu_C (nx_block , ny_block , & ! u, E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), Cdn_ocnE (:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & aiE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & waterxE (:,:,iblk), forcexE (:,:,iblk), & @@ -944,8 +950,8 @@ subroutine evp (dt) TbE (:,:,iblk)) call stepv_C (nx_block, ny_block, & ! v, N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), Cdn_ocnN (:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & aiN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & wateryN (:,:,iblk), forceyN (:,:,iblk), & @@ -958,10 +964,10 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & uvelE) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & vvelN) @@ -971,10 +977,10 @@ subroutine evp (dt) vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & uvelN) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & vvelE) @@ -983,14 +989,43 @@ subroutine evp (dt) uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) + + enddo ! subcycling - elseif (grid_ice == "CD") then + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call deformationsC_T (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), uarea (:,:,iblk), & + shearU (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + elseif (grid_ice == "CD") then + + do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressCD_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & @@ -1001,26 +1036,11 @@ subroutine evp (dt) stresspT (:,:,iblk), stressmT (:,:,iblk), & stress12T (:,:,iblk) ) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - call deformationsCD_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - endif enddo !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & zetax2T, etax2T) @@ -1038,8 +1058,8 @@ subroutine evp (dt) ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- call strain_rates_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & @@ -1052,8 +1072,8 @@ subroutine evp (dt) shearU (:,:,iblk), DeltaU (:,:,iblk) ) call stressCD_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uarea (:,:,iblk), & zetax2U (:,:,iblk), etax2U (:,:,iblk), & strengthU(:,:,iblk), & @@ -1065,10 +1085,10 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & stresspT, stressmT, stress12T) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_NEcorner,field_type_scalar, & stresspU, stressmU, stress12U) @@ -1076,8 +1096,8 @@ subroutine evp (dt) do iblk = 1, nblocks call div_stress_Ex (nx_block , ny_block , & - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & earear (:,:,iblk) , & @@ -1085,8 +1105,8 @@ subroutine evp (dt) stress12U (:,:,iblk), strintxE (:,:,iblk) ) call div_stress_Ey (nx_block , ny_block , & - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & earear (:,:,iblk) , & @@ -1094,8 +1114,8 @@ subroutine evp (dt) stress12T (:,:,iblk), strintyE (:,:,iblk) ) call div_stress_Nx (nx_block , ny_block , & - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & narear (:,:,iblk) , & @@ -1103,8 +1123,8 @@ subroutine evp (dt) stress12T (:,:,iblk), strintxN (:,:,iblk) ) call div_stress_Ny (nx_block , ny_block , & - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & narear (:,:,iblk) , & @@ -1118,8 +1138,8 @@ subroutine evp (dt) do iblk = 1, nblocks call stepuv_CD (nx_block , ny_block , & ! E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), Cdn_ocnE (:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & aiE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & waterxE (:,:,iblk), wateryE (:,:,iblk), & @@ -1132,8 +1152,8 @@ subroutine evp (dt) TbE (:,:,iblk)) call stepuv_CD (nx_block , ny_block , & ! N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), Cdn_ocnN (:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & aiN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & waterxN (:,:,iblk), wateryN (:,:,iblk), & @@ -1148,10 +1168,10 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & uvelE, vvelE) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & uvelN, vvelN) @@ -1160,16 +1180,34 @@ subroutine evp (dt) uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) + + enddo ! subcycling - endif ! grid_ice + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- - ! U fields at NE corner - ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_vector, & - uvel, vvel) + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call deformationsCD_T (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + endif ! grid_ice - enddo ! subcycling call ice_timer_stop(timer_evp_2d) endif ! evp_algorithm @@ -1260,8 +1298,8 @@ subroutine evp (dt) do iblk = 1, nblocks call dyn_finish & (nx_block , ny_block , & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiU (:,:,iblk), fmU (:,:,iblk), & @@ -1276,8 +1314,8 @@ subroutine evp (dt) call dyn_finish & (nx_block , ny_block , & - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), Cdn_ocnN(:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & aiN (:,:,iblk), fmN (:,:,iblk), & @@ -1285,8 +1323,8 @@ subroutine evp (dt) call dyn_finish & (nx_block , ny_block , & - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), Cdn_ocnE(:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & aiE (:,:,iblk), fmE (:,:,iblk), & @@ -1297,30 +1335,12 @@ subroutine evp (dt) endif - ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T - ! TODO: This should be done elsewhere as part of generalization? - ! TODO: Rename strocn[x,y]T since it's different than strocn[x,y][U,N,E] - ! conservation requires aiU be divided before averaging - work1 = c0 - work2 = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij,iblk) - j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) - work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) - enddo - enddo - !$OMP END PARALLEL DO - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (work2, halo_info, & - field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F', work1, 'U', strocnxT, 'T') ! shift - call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') - if (grid_ice == 'CD' .or. grid_ice == 'C') then + call ice_HaloUpdate (strintxE, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (strintyN, halo_info, & + field_loc_Nface, field_type_vector) + call ice_timer_stop(timer_bound) call grid_average_X2Y('S', strintxE, 'E', strintxU, 'U') ! diagnostic call grid_average_X2Y('S', strintyN, 'N', strintyU, 'U') ! diagnostic endif @@ -1337,8 +1357,8 @@ end subroutine evp ! author: Elizabeth C. Hunke, LANL subroutine stress (nx_block, ny_block, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & uvel, vvel, & dxT, dyT, & dxhy, dyhx, & @@ -1358,11 +1378,11 @@ subroutine stress (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & strength , & ! ice strength (N/m) @@ -1418,9 +1438,9 @@ subroutine stress (nx_block, ny_block, & str(:,:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1639,8 +1659,8 @@ end subroutine stress ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. subroutine stressC_T (nx_block, ny_block , & - icellt , & - indxti , indxtj , & + icellT , & + indxTi , indxTj , & uvelE , vvelE , & uvelN , vvelN , & dxN , dyE , & @@ -1655,11 +1675,11 @@ subroutine stressC_T (nx_block, ny_block , & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -1702,17 +1722,17 @@ subroutine stressC_T (nx_block, ny_block , & !----------------------------------------------------------------- call strain_rates_T (nx_block , ny_block , & - icellt , & - indxti(:) , indxtj (:) , & + icellT , & + indxTi(:) , indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & dxT (:,:), dyT (:,:), & divT (:,:), tensionT(:,:) ) - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! Square of shear strain rate at T obtained from interpolation of @@ -1764,8 +1784,8 @@ end subroutine stressC_T ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. subroutine stressC_U (nx_block , ny_block, & - icellu, & - indxui , indxuj, & + icellU, & + indxUi , indxUj, & uarea , & etax2U , deltaU, & strengthU, shearU, & @@ -1776,11 +1796,11 @@ subroutine stressC_U (nx_block , ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where iceumask = 1 + icellU ! no. of cells where iceUmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uarea , & ! area of U point @@ -1813,17 +1833,17 @@ subroutine stressC_U (nx_block , ny_block, & !----------------------------------------------------------------- if (visc_method == 'avg_zeta') then - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & + arlx1i*p5*etax2U(i,j)*shearU(i,j)) * denom1 enddo elseif (visc_method == 'avg_strength') then - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) DminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is @@ -1844,8 +1864,8 @@ end subroutine stressC_U ! Nov 2021 subroutine stressCD_T (nx_block, ny_block, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & uvelE, vvelE, & uvelN, vvelN, & dxN, dyE, & @@ -1861,11 +1881,11 @@ subroutine stressCD_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -1908,8 +1928,8 @@ subroutine stressCD_T (nx_block, ny_block, & !----------------------------------------------------------------- call strain_rates_T (nx_block , ny_block , & - icellt , & - indxti(:) , indxtj (:) , & + icellT , & + indxTi(:) , indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & @@ -1917,9 +1937,9 @@ subroutine stressCD_T (nx_block, ny_block, & divT (:,:), tensionT(:,:), & shearT(:,:), DeltaT (:,:) ) - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! viscosities and replacement pressure at T point @@ -1954,8 +1974,8 @@ end subroutine stressCD_T ! Nov 2021 subroutine stressCD_U (nx_block, ny_block, & - icellu, & - indxui, indxuj, & + icellU, & + indxUi, indxUj, & uarea, & zetax2U, etax2U, & strengthU, & @@ -1970,11 +1990,11 @@ subroutine stressCD_U (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where iceumask = 1 + icellU ! no. of cells where iceUmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uarea , & ! area of U-cell (m^2) @@ -2004,9 +2024,9 @@ subroutine stressCD_U (nx_block, ny_block, & character(len=*), parameter :: subname = '(stressCD_U)' - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) !----------------------------------------------------------------- ! viscosities and replacement pressure at U point diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 index 2c6ae3dd4..e3432eaab 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 @@ -1029,7 +1029,7 @@ end subroutine dealloc1d !======================================================================= subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & - I_icetmask, I_iceumask, I_cdn_ocn, I_aiu, I_uocn, I_vocn, & + I_iceTmask, I_iceUmask, I_cdn_ocn, I_aiu, I_uocn, I_vocn, & I_forcex, I_forcey, I_Tbu, I_umassdti, I_fm, I_uarear, I_tarear, & I_strintx, I_strinty, I_uvel_init, I_vvel_init, I_strength, & I_uvel, I_vvel, I_dxt, I_dyt, I_stressp_1, I_stressp_2, & @@ -1047,9 +1047,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & integer(int_kind), intent(in) :: nx, ny, nblk, nx_glob, ny_glob logical(kind=log_kind), dimension(nx, ny, nblk), intent(in) :: & - I_iceumask - integer(kind=int_kind), dimension(nx, ny, nblk), intent(in) :: & - I_icetmask + I_iceTmask, I_iceUmask real(kind=dbl_kind), dimension(nx, ny, nblk), intent(in) :: & I_cdn_ocn, I_aiu, I_uocn, I_vocn, I_forcex, I_forcey, I_Tbu, & I_umassdti, I_fm, I_uarear, I_tarear, I_strintx, I_strinty, & @@ -1061,9 +1059,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & ! local variables logical(kind=log_kind), dimension(nx_glob, ny_glob) :: & - G_iceumask - integer(kind=int_kind), dimension(nx_glob, ny_glob) :: & - G_icetmask + G_iceTmask, G_iceUmask real(kind=dbl_kind), dimension(nx_glob, ny_glob) :: & G_cdn_ocn, G_aiu, G_uocn, G_vocn, G_forcex, G_forcey, G_Tbu, & G_umassdti, G_fm, G_uarear, G_tarear, G_strintx, G_strinty, & @@ -1075,8 +1071,8 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & character(len=*), parameter :: & subname = '(ice_dyn_evp_1d_copyin)' - call gather_global_ext(G_icetmask, I_icetmask, master_task, distrb_info ) - call gather_global_ext(G_iceumask, I_iceumask, master_task, distrb_info ) + call gather_global_ext(G_iceTmask, I_iceTmask, master_task, distrb_info ) + call gather_global_ext(G_iceUmask, I_iceUmask, master_task, distrb_info ) call gather_global_ext(G_cdn_ocn, I_cdn_ocn, master_task, distrb_info ) call gather_global_ext(G_aiu, I_aiu, master_task, distrb_info ) call gather_global_ext(G_uocn, I_uocn, master_task, distrb_info ) @@ -1113,9 +1109,9 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & ! all calculations id done on master task if (my_task == master_task) then ! find number of active points and allocate 1D vectors - call calc_na(nx_glob, ny_glob, NA_len, G_icetmask, G_iceumask) + call calc_na(nx_glob, ny_glob, NA_len, G_iceTmask, G_iceUmask) call alloc1d(NA_len) - call calc_2d_indices(nx_glob, ny_glob, NA_len, G_icetmask, G_iceumask) + call calc_2d_indices(nx_glob, ny_glob, NA_len, G_iceTmask, G_iceUmask) call calc_navel(nx_glob, ny_glob, NA_len, NAVEL_len) call alloc1d_navel(NAVEL_len) ! initialize OpenMP. FIXME: ought to be called from main @@ -1132,7 +1128,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & G_stressp_2, G_stressp_3, G_stressp_4, G_stressm_1, & G_stressm_2, G_stressm_3, G_stressm_4, G_stress12_1, & G_stress12_2, G_stress12_3, G_stress12_4) - call calc_halo_parent(nx_glob, ny_glob, NA_len, NAVEL_len, G_icetmask) + call calc_halo_parent(nx_glob, ny_glob, NA_len, NAVEL_len, G_iceTmask) end if end subroutine ice_dyn_evp_1d_copyin @@ -1348,7 +1344,7 @@ end subroutine ice_dyn_evp_1d_kernel !======================================================================= - subroutine calc_na(nx, ny, na, icetmask, iceumask) + subroutine calc_na(nx, ny, na, iceTmask, iceUmask) ! Calculate number of active points use ice_blocks, only : nghost @@ -1356,10 +1352,8 @@ subroutine calc_na(nx, ny, na, icetmask, iceumask) implicit none integer(kind=int_kind), intent(in) :: nx, ny - integer(kind=int_kind), dimension(nx, ny), intent(in) :: & - icetmask logical(kind=log_kind), dimension(nx, ny), intent(in) :: & - iceumask + iceTmask, iceUmask integer(kind=int_kind), intent(out) :: na ! local variables @@ -1372,7 +1366,7 @@ subroutine calc_na(nx, ny, na, icetmask, iceumask) ! NOTE: T mask includes northern and eastern ghost cells do j = 1 + nghost, ny do i = 1 + nghost, nx - if (icetmask(i,j) == 1 .or. iceumask(i,j)) na = na + 1 + if (iceTmask(i,j) .or. iceUmask(i,j)) na = na + 1 end do end do @@ -1380,17 +1374,15 @@ end subroutine calc_na !======================================================================= - subroutine calc_2d_indices(nx, ny, na, icetmask, iceumask) + subroutine calc_2d_indices(nx, ny, na, iceTmask, iceUmask) use ice_blocks, only : nghost implicit none integer(kind=int_kind), intent(in) :: nx, ny, na - integer(kind=int_kind), dimension(nx, ny), intent(in) :: & - icetmask logical(kind=log_kind), dimension(nx, ny), intent(in) :: & - iceumask + iceTmask, iceUmask ! local variables @@ -1406,12 +1398,12 @@ subroutine calc_2d_indices(nx, ny, na, icetmask, iceumask) ! NOTE: T mask includes northern and eastern ghost cells do j = 1 + nghost, ny do i = 1 + nghost, nx - if (icetmask(i,j) == 1 .or. iceumask(i,j)) then + if (iceTmask(i,j) .or. iceUmask(i,j)) then Nmaskt = Nmaskt + 1 indi(Nmaskt) = i indj(Nmaskt) = j - if (icetmask(i,j) /= 1) skiptcell(Nmaskt) = .true. - if (.not. iceumask(i,j)) skipucell(Nmaskt) = .true. + if (.not. iceTmask(i,j)) skiptcell(Nmaskt) = .true. + if (.not. iceUmask(i,j)) skipucell(Nmaskt) = .true. ! NOTE: U mask does not include northern and eastern ! ghost cells. Skip northern and eastern ghost cells if (i == nx) skipucell(Nmaskt) = .true. @@ -1600,13 +1592,13 @@ end subroutine convert_2d_1d !======================================================================= - subroutine calc_halo_parent(nx, ny, na, navel, I_icetmask) + subroutine calc_halo_parent(nx, ny, na, navel, I_iceTmask) implicit none integer(kind=int_kind), intent(in) :: nx, ny, na, navel - integer(kind=int_kind), dimension(nx, ny), intent(in) :: & - I_icetmask + logical(kind=log_kind), dimension(nx, ny), intent(in) :: & + I_iceTmask ! local variables @@ -1631,10 +1623,10 @@ subroutine calc_halo_parent(nx, ny, na, navel, I_icetmask) j = int((indij(iw) - 1) / (nx)) + 1 i = indij(iw) - (j - 1) * nx ! if within ghost zone - if (i == nx .and. I_icetmask(2, j) == 1) Ihalo(iw) = 2 + (j - 1) * nx - if (i == 1 .and. I_icetmask(nx - 1, j) == 1) Ihalo(iw) = (nx - 1) + (j - 1) * nx - if (j == ny .and. I_icetmask(i, 2) == 1) Ihalo(iw) = i + nx - if (j == 1 .and. I_icetmask(i, ny - 1) == 1) Ihalo(iw) = i + (ny - 2) * nx + if (i == nx .and. I_iceTmask(2, j) ) Ihalo(iw) = 2 + (j - 1) * nx + if (i == 1 .and. I_iceTmask(nx - 1, j) ) Ihalo(iw) = (nx - 1) + (j - 1) * nx + if (j == ny .and. I_iceTmask(i, 2) ) Ihalo(iw) = i + nx + if (j == 1 .and. I_iceTmask(i, ny - 1) ) Ihalo(iw) = i + (ny - 2) * nx end do ! relate halo indices to indij vector diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 95d2eedb1..187ec55cc 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -106,6 +106,12 @@ module ice_dyn_shared uvelE_init , & ! x-component of velocity (m/s), beginning of timestep vvelE_init ! y-component of velocity (m/s), beginning of timestep + logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & + iceTmask, & ! ice extent mask (T-cell) + iceUmask, & ! ice extent mask (U-cell) + iceNmask, & ! ice extent mask (N-cell) + iceEmask ! ice extent mask (E-cell) + real (kind=dbl_kind), allocatable, public :: & DminTarea(:,:,:) ! deltamin * tarea (m^2/s) @@ -168,6 +174,8 @@ subroutine alloc_dyn_shared allocate( & uvel_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvel_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep + iceTmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics + iceUmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') @@ -177,6 +185,8 @@ subroutine alloc_dyn_shared vvelE_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep uvelN_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvelN_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep + iceEmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics + iceNmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') endif @@ -199,7 +209,7 @@ subroutine init_dyn (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear - use ice_grid, only: ULAT, NLAT, ELAT, tarea, iceumask, iceemask, icenmask + use ice_grid, only: ULAT, NLAT, ELAT, tarea real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -310,10 +320,10 @@ subroutine init_dyn (dt) endif ! ice extent mask on velocity points - iceumask(i,j,iblk) = .false. + iceUmask(i,j,iblk) = .false. if (grid_ice == 'CD' .or. grid_ice == 'C') then - iceemask(i,j,iblk) = .false. - icenmask(i,j,iblk) = .false. + iceEmask(i,j,iblk) = .false. + iceNmask(i,j,iblk) = .false. end if enddo ! i enddo ! j @@ -394,7 +404,7 @@ subroutine dyn_prep1 (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & Tmass ! total mass of ice and snow (kg/m^2) - integer (kind=int_kind), dimension (nx_block,ny_block), intent(out) :: & + logical (kind=log_kind), dimension (nx_block,ny_block), intent(out) :: & iceTmask ! ice extent mask (T-cell) ! local variables @@ -438,7 +448,7 @@ subroutine dyn_prep1 (nx_block, ny_block, & !----------------------------------------------------------------- ! augmented mask (land + open ocean) !----------------------------------------------------------------- - iceTmask (i,j) = 0 + iceTmask (i,j) = .false. enddo enddo @@ -450,10 +460,10 @@ subroutine dyn_prep1 (nx_block, ny_block, & if (tmphm(i-1,j+1) .or. tmphm(i,j+1) .or. tmphm(i+1,j+1) .or. & tmphm(i-1,j) .or. tmphm(i,j) .or. tmphm(i+1,j) .or. & tmphm(i-1,j-1) .or. tmphm(i,j-1) .or. tmphm(i+1,j-1) ) then - iceTmask(i,j) = 1 + iceTmask(i,j) = .true. endif - if (.not.Tmask(i,j)) iceTmask(i,j) = 0 + if (.not.Tmask(i,j)) iceTmask(i,j) = .false. enddo enddo @@ -504,8 +514,8 @@ subroutine dyn_prep2 (nx_block, ny_block, & ilo,ihi,jlo,jhi ! beginning and end of physical domain integer (kind=int_kind), intent(out) :: & - icellT , & ! no. of cells where iceTmask = 1 - icellX ! no. of cells where iceXmask = 1 + icellT , & ! no. of cells where iceTmask = .true. + icellX ! no. of cells where iceXmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(out) :: & indxTi , & ! compressed index in i-direction on T grid @@ -516,7 +526,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & Xmask ! land/boundary mask, thickness (X-grid-cell) - integer (kind=int_kind), dimension (nx_block,ny_block), intent(in) :: & + logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & iceTmask ! ice extent mask (T-cell) logical (kind=log_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -590,7 +600,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & taubx (i,j) = c0 tauby (i,j) = c0 - if (iceTmask(i,j)==0) then + if (.not.iceTmask(i,j)) then stressp_1 (i,j) = c0 stressp_2 (i,j) = c0 stressp_3 (i,j) = c0 @@ -608,7 +618,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & enddo ! j !----------------------------------------------------------------- - ! Identify cells where iceTmask = 1 + ! Identify cells where iceTmask = .true. ! Note: The icellT mask includes north and east ghost cells ! where stresses are needed. !----------------------------------------------------------------- @@ -616,7 +626,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & icellT = 0 do j = jlo, jhi+1 do i = ilo, ihi+1 - if (iceTmask(i,j) == 1) then + if (iceTmask(i,j)) then icellT = icellT + 1 indxTi(icellT) = i indxTj(icellT) = j @@ -729,7 +739,7 @@ subroutine stepu (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellU ! total count when iceumask is true + icellU ! total count when iceUmask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxUi , & ! compressed index in i-direction @@ -1166,7 +1176,7 @@ subroutine dyn_finish (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellU ! total count when iceumask is true + icellU ! total count when iceUmask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxUi , & ! compressed index in i-direction @@ -1635,7 +1645,7 @@ subroutine deformations (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxTi , & ! compressed index in i-direction @@ -1733,7 +1743,7 @@ subroutine deformationsCD_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxTi , & ! compressed index in i-direction @@ -1830,7 +1840,7 @@ subroutine deformationsC_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxTi , & ! compressed index in i-direction diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 17fd0b73f..6534e7568 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -51,7 +51,7 @@ module ice_dyn_vp seabed_stress, Ktens, stack_fields, unstack_fields use ice_fileunits, only: nu_diag use ice_flux, only: fmU - use ice_global_reductions, only: global_sum, global_allreduce_sum + use ice_global_reductions, only: global_sum use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, uarear use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -96,17 +96,19 @@ module ice_dyn_vp ! module variables integer (kind=int_kind), allocatable :: & - icellt(:) , & ! no. of cells where icetmask = 1 - icellu(:) ! no. of cells where iceumask = 1 + icellT(:) , & ! no. of cells where iceTmask = .true. + icellU(:) ! no. of cells where iceUmask = .true. integer (kind=int_kind), allocatable :: & - indxti(:,:) , & ! compressed index in i-direction - indxtj(:,:) , & ! compressed index in j-direction - indxui(:,:) , & ! compressed index in i-direction - indxuj(:,:) ! compressed index in j-direction + indxTi(:,:) , & ! compressed index in i-direction + indxTj(:,:) , & ! compressed index in j-direction + indxUi(:,:) , & ! compressed index in i-direction + indxUj(:,:) ! compressed index in j-direction real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:) ! work array for boundary updates + fld2(:,:,:,:), & ! work array for boundary updates + fld3(:,:,:,:), & ! work array for boundary updates + fld4(:,:,:,:) ! work array for boundary updates !======================================================================= @@ -136,12 +138,14 @@ subroutine init_vp this_block ! block information for current block ! Initialize module variables - allocate(icellt(max_blocks), icellu(max_blocks)) - allocate(indxti(nx_block*ny_block, max_blocks), & - indxtj(nx_block*ny_block, max_blocks), & - indxui(nx_block*ny_block, max_blocks), & - indxuj(nx_block*ny_block, max_blocks)) + allocate(icellT(max_blocks), icellU(max_blocks)) + allocate(indxTi(nx_block*ny_block, max_blocks), & + indxTj(nx_block*ny_block, max_blocks), & + indxUi(nx_block*ny_block, max_blocks), & + indxUj(nx_block*ny_block, max_blocks)) allocate(fld2(nx_block,ny_block,2,max_blocks)) + allocate(fld3(nx_block,ny_block,3,max_blocks)) + allocate(fld4(nx_block,ny_block,4,max_blocks)) end subroutine init_vp @@ -167,19 +171,19 @@ subroutine implicit_solver (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, halo_info, maskhalo_dyn use ice_domain_size, only: max_blocks, ncat - use ice_dyn_shared, only: deformations + use ice_dyn_shared, only: deformations, iceTmask, iceUmask use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strocnxT, strocnyT, strax, stray, & + strax, stray, & TbU, hwater, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxT, dyT, cxp, cyp, cxm, cym, & - tarear, grid_type, grid_average_X2Y, iceumask, & + tarear, grid_type, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & + use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -200,6 +204,7 @@ subroutine implicit_solver (dt) vocnU , & ! j ocean current (m/s) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) + cdn_ocnU , & ! ocn drag coefficient tmass , & ! total mass of ice and snow (kg/m^2) waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) @@ -210,7 +215,6 @@ subroutine implicit_solver (dt) Cb , & ! seabed stress coefficient fpresx , & ! fixed point residual vector, x components: fx = uvel - uprev_k fpresy , & ! fixed point residual vector, y components: fy = vvel - vprev_k - aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -222,7 +226,6 @@ subroutine implicit_solver (dt) logical (kind=log_kind) :: calc_strair integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask, & ! ice extent mask (T-cell) halomask ! generic halo mask type (ice_halo) :: & @@ -234,10 +237,6 @@ subroutine implicit_solver (dt) real (kind=dbl_kind), allocatable :: & sol(:) ! solution vector - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1, & ! temporary - work2 ! temporary - character(len=*), parameter :: subname = '(implicit_solver)' call ice_timer_start(timer_dynamics) ! dynamics @@ -290,13 +289,13 @@ subroutine implicit_solver (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), icetmask(:,:,iblk)) + tmass (:,:,iblk), iceTmask(:,:,iblk)) enddo ! iblk !$OMP END PARALLEL DO call ice_timer_start(timer_bound) - call ice_HaloUpdate (icetmask, halo_info, & + call ice_HaloUpdate (iceTmask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -304,12 +303,22 @@ subroutine implicit_solver (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F',tmass , 'T', umass, 'U') - call grid_average_X2Y('F',aice_init, 'T', aiU , 'U') - call grid_average_X2Y('S',uocn , grid_ocn_dynu, uocnU , 'U') - call grid_average_X2Y('S',vocn , grid_ocn_dynv, vocnU , 'U') - call grid_average_X2Y('S',ss_tltx, grid_ocn_dynu, ss_tltxU, 'U') - call grid_average_X2Y('S',ss_tlty, grid_ocn_dynv, ss_tltyU, 'U') + call stack_fields(tmass, aice_init, cdn_ocn, fld3) + call ice_HaloUpdate (fld3, halo_info, & + field_loc_center, field_type_scalar) + call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) + call ice_HaloUpdate (fld4, halo_info, & + field_loc_center, field_type_vector) + call unstack_fields(fld3, tmass, aice_init, cdn_ocn) + call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) + + call grid_average_X2Y('S',tmass , 'T' , umass , 'U') + call grid_average_X2Y('S',aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S',cdn_ocn , 'T' , cdn_ocnU, 'U') + call grid_average_X2Y('S',uocn , grid_ocn_dynu, uocnU , 'U') + call grid_average_X2Y('S',vocn , grid_ocn_dynv, vocnU , 'U') + call grid_average_X2Y('S',ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') + call grid_average_X2Y('S',ss_tlty , grid_ocn_dynv, ss_tltyU, 'U') !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -332,9 +341,7 @@ subroutine implicit_solver (dt) call grid_average_X2Y('F',strairyT,'T',strairyU,'U') endif -! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength -! need to do more debugging - !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) do iblk = 1, nblocks !----------------------------------------------------------------- @@ -349,16 +356,16 @@ subroutine implicit_solver (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt(iblk), icellu(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT(iblk), icellU(iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -377,8 +384,8 @@ subroutine implicit_solver (dt) TbU (:,:,iblk)) call calc_bfix (nx_block , ny_block , & - icellu(iblk) , & - indxui (:,iblk), indxuj (:,iblk), & + icellU(iblk) , & + indxUi (:,iblk), indxUj (:,iblk), & umassdti (:,:,iblk), & forcexU (:,:,iblk), forceyU (:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & @@ -389,9 +396,9 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellt(iblk) - i = indxti(ij, iblk) - j = indxtj(ij, iblk) + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) call icepack_ice_strength (ncat, & aice (i,j, iblk), & vice (i,j, iblk), & @@ -402,7 +409,7 @@ subroutine implicit_solver (dt) enddo ! ij enddo ! iblk - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -421,7 +428,7 @@ subroutine implicit_solver (dt) if (maskhalo_dyn) then call ice_timer_start(timer_bound) halomask = 0 - where (iceumask) halomask = 1 + where (iceUmask) halomask = 1 call ice_HaloUpdate (halomask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -436,8 +443,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block, ny_block, & - icellu (iblk), & - indxui(:,iblk), indxuj(:,iblk), & + icellU (iblk), & + indxUi(:,iblk), indxUj(:,iblk), & vice(:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbU(:,:,iblk)) enddo @@ -448,8 +455,8 @@ subroutine implicit_solver (dt) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block, ny_block, & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & hwater(:,:,iblk), TbU(:,:,iblk)) enddo @@ -465,7 +472,7 @@ subroutine implicit_solver (dt) ntot = 0 do iblk = 1, nblocks - ntot = ntot + icellu(iblk) + ntot = ntot + icellU(iblk) enddo ntot = 2 * ntot ! times 2 because of u and v @@ -474,9 +481,9 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- ! Start of nonlinear iteration !----------------------------------------------------------------- - call anderson_solver (icellt , icellu , & - indxti , indxtj , & - indxui , indxuj , & + call anderson_solver (icellT , icellU , & + indxTi , indxTj , & + indxUi , indxUj , & aiU , ntot , & uocnU , vocnU , & waterxU , wateryU, & @@ -484,7 +491,7 @@ subroutine implicit_solver (dt) umassdti, sol , & fpresx , fpresy , & zetax2 , etax2 , & - rep_prs , & + rep_prs , cdn_ocnU,& Cb, halo_info_mask) !----------------------------------------------------------------- ! End of nonlinear iteration @@ -500,8 +507,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stress_vp (nx_block , ny_block , & - icellt(iblk) , & - indxti (:,iblk), indxtj (:,iblk), & + icellT(iblk) , & + indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & @@ -523,8 +530,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call deformations (nx_block , ny_block , & - icellt(iblk) , & - indxti (:,iblk), indxtj (:,iblk), & + icellT(iblk) , & + indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & @@ -542,8 +549,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call calc_seabed_stress (nx_block , ny_block , & - icellu(iblk) , & - indxui (:,iblk), indxuj (:,iblk), & + icellU(iblk) , & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & Cb (:,:,iblk), & taubxU (:,:,iblk), taubyU (:,:,iblk)) @@ -628,8 +635,8 @@ subroutine implicit_solver (dt) call dyn_finish & (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiU (:,:,iblk), fmU (:,:,iblk), & @@ -640,27 +647,6 @@ subroutine implicit_solver (dt) enddo !$OMP END PARALLEL DO - ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T - ! TODO: This should be done elsewhere as part of generalization? - ! conservation requires aiU be divided before averaging - work1 = c0 - work2 = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij,iblk) - j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) - work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) - enddo - enddo - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (work2, halo_info, & - field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift - call grid_average_X2Y('F',work2,'U',strocnyT,'T') - ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport ! commented out in order to focus on EVP for now within the cdgrid ! should be used when routine is ready @@ -685,9 +671,9 @@ end subroutine implicit_solver ! H. F. Walker, “Anderson Acceleration: Algorithms and Implementations” ! [Online]. Available: https://users.wpi.edu/~walker/Papers/anderson_accn_algs_imps.pdf - subroutine anderson_solver (icellt , icellu , & - indxti , indxtj , & - indxui , indxuj , & + subroutine anderson_solver (icellT , icellU , & + indxTi , indxTj , & + indxUi , indxUj , & aiU , ntot , & uocn , vocn , & waterxU , wateryU, & @@ -695,10 +681,9 @@ subroutine anderson_solver (icellt , icellu , & umassdti, sol , & fpresx , fpresy , & zetax2 , etax2 , & - rep_prs , & + rep_prs , cdn_ocn, & Cb, halo_info_mask) - use ice_arrays_column, only: Cdn_ocn use ice_blocks, only: nx_block, ny_block use ice_boundary, only: ice_HaloUpdate use ice_constants, only: c1 @@ -715,19 +700,20 @@ subroutine anderson_solver (icellt , icellu , & ntot ! size of problem for Anderson integer (kind=int_kind), dimension(max_blocks), intent(in) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + icellT , & ! no. of cells where iceTmask = .true. + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj , & ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & aiU , & ! ice fraction on u-grid uocn , & ! i ocean current (m/s) vocn , & ! j ocean current (m/s) + cdn_ocn , & ! ocn drag coefficient waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) bxfix , & ! part of bx that is constant during Picard @@ -783,9 +769,6 @@ subroutine anderson_solver (icellt , icellu , & stress_Pr, & ! x,y-derivatives of the replacement pressure diag_rheo ! contributions of the rhelogy term to the diagonal - real (kind=dbl_kind), dimension (max_blocks) :: & - L2norm ! array used to compute l^2 norm of grid function - real (kind=dbl_kind), dimension (ntot) :: & res , & ! current residual res_old , & ! previous residual @@ -821,7 +804,6 @@ subroutine anderson_solver (icellt , icellu , & ! Initialization res_num = 0 - L2norm = c0 !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -850,8 +832,8 @@ subroutine anderson_solver (icellt , icellu , & vprev_k(:,:,iblk) = vvel(:,:,iblk) call calc_zeta_dPr (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uprev_k (:,:,iblk), vprev_k (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & @@ -862,8 +844,8 @@ subroutine anderson_solver (icellt , icellu , & rep_prs(:,:,iblk,:), stress_Pr (:,:,:)) call calc_vrel_Cb (nx_block , ny_block , & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocn (:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), TbU (:,:,iblk), & uocn (:,:,iblk), vocn (:,:,iblk), & ulin (:,:,iblk), vlin (:,:,iblk), & @@ -871,8 +853,8 @@ subroutine anderson_solver (icellt , icellu , & ! prepare b vector (RHS) call calc_bvec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & stress_Pr (:,:,:), uarear (:,:,iblk), & waterxU (:,:,iblk), wateryU (:,:,iblk), & bxfix (:,:,iblk), byfix (:,:,iblk), & @@ -881,9 +863,9 @@ subroutine anderson_solver (icellt , icellu , & ! Compute nonlinear residual norm (PDE residual) call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -895,15 +877,17 @@ subroutine anderson_solver (icellt , icellu , & uarear (:,:,iblk) , & Au (:,:,iblk) , Av (:,:,iblk)) call residual_vec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & bx (:,:,iblk), by (:,:,iblk), & Au (:,:,iblk), Av (:,:,iblk), & - Fx (:,:,iblk), Fy (:,:,iblk), & - L2norm (iblk)) + Fx (:,:,iblk), Fy (:,:,iblk)) enddo !$OMP END PARALLEL DO - nlres_norm = sqrt(global_sum(sum(L2norm), distrb_info)) + nlres_norm = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + Fx , Fy ) if (my_task == master_task .and. monitor_nonlin) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & " nonlin_res_L2norm= ", nlres_norm @@ -923,8 +907,8 @@ subroutine anderson_solver (icellt , icellu , & soly = vprev_k call arrays_to_vec (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj (:,:), & + icellU (:), ntot , & + indxUi (:,:), indxUj (:,:), & uprev_k (:,:,:), vprev_k (:,:,:), & sol (:)) @@ -938,8 +922,8 @@ subroutine anderson_solver (icellt , icellu , & do iblk = 1, nblocks ! first compute diagonal contributions due to rheology term call formDiag_step1 (nx_block , ny_block , & - icellu (iblk) , & - indxui (:,iblk) , indxuj(:,iblk), & + icellU (iblk) , & + indxUi (:,iblk) , indxUj(:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx(:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -948,8 +932,8 @@ subroutine anderson_solver (icellt , icellu , & diag_rheo(:,:,:)) ! second compute the full diagonal call formDiag_step2 (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & diag_rheo (:,:,:), vrel (:,:,iblk), & umassdti (:,:,iblk), & uarear (:,:,iblk), Cb (:,:,iblk), & @@ -972,8 +956,8 @@ subroutine anderson_solver (icellt , icellu , & ! Put FGMRES solution solx,soly in fpfunc vector (needed for Anderson) call arrays_to_vec (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj (:,:), & + icellU (:), ntot , & + indxUi (:,:), indxUj (:,:), & solx (:,:,:), soly (:,:,:), & fpfunc (:)) elseif (fpfunc_andacc == 2) then @@ -989,20 +973,14 @@ subroutine anderson_solver (icellt , icellu , & #else call vec_to_arrays (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj(:,:) , & + icellU (:), ntot , & + indxUi (:,:), indxUj(:,:) , & res (:), & fpresx (:,:,:), fpresy (:,:,:)) - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & - fpresx(:,:,iblk), fpresy(:,:,iblk), & - L2norm (iblk)) - enddo - !$OMP END PARALLEL DO - fpres_norm = sqrt(global_sum(sum(L2norm), distrb_info)) + fpres_norm = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + fpresx , fpresy ) #endif if (my_task == master_task .and. monitor_nonlin) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & @@ -1108,8 +1086,8 @@ subroutine anderson_solver (icellt , icellu , & !----------------------------------------------------------------------- call vec_to_arrays (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj (:,:), & + icellU (:), ntot , & + indxUi (:,:), indxUj (:,:), & sol (:), & uvel (:,:,:), vvel (:,:,:)) @@ -1131,14 +1109,12 @@ subroutine anderson_solver (icellt , icellu , & do iblk = 1, nblocks fpresx(:,:,iblk) = uvel(:,:,iblk) - uprev_k(:,:,iblk) fpresy(:,:,iblk) = vvel(:,:,iblk) - vprev_k(:,:,iblk) - call calc_L2norm_squared (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & - fpresx(:,:,iblk), fpresy(:,:,iblk), & - L2norm (iblk)) enddo !$OMP END PARALLEL DO - prog_norm = sqrt(global_sum(sum(L2norm), distrb_info)) + prog_norm = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + fpresx , fpresy ) if (my_task == master_task .and. monitor_nonlin) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & " progress_res_L2norm= ", prog_norm @@ -1153,8 +1129,8 @@ end subroutine anderson_solver ! Computes the viscosities and dPr/dx, dPr/dy subroutine calc_zeta_dPr (nx_block, ny_block, & - icellt , & - indxti , indxtj , & + icellT , & + indxTi , indxTj , & uvel , vvel , & dxT , dyT , & dxhy , dyhx , & @@ -1169,11 +1145,11 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & strength , & ! ice strength (N/m) @@ -1215,14 +1191,14 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & character(len=*), parameter :: subname = '(calc_zeta_dPr)' ! Initialize stPr, zetax2 and etax2 to zero - ! (for cells where icetmask is false) + ! (for cells where iceTmask is false) stPr = c0 zetax2 = c0 etax2 = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1348,8 +1324,8 @@ end subroutine calc_zeta_dPr ! viscous-plastic sea ice stresses, Geosci. Model Dev., 13, 1763–1769, subroutine stress_vp (nx_block , ny_block , & - icellt , & - indxti , indxtj , & + icellT , & + indxTi , indxTj , & uvel , vvel , & dxT , dyT , & cxp , cyp , & @@ -1367,11 +1343,11 @@ subroutine stress_vp (nx_block , ny_block , & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) @@ -1406,9 +1382,9 @@ subroutine stress_vp (nx_block , ny_block , & character(len=*), parameter :: subname = '(stress_vp)' - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1458,8 +1434,8 @@ end subroutine stress_vp ! Compute vrel and seabed stress coefficients subroutine calc_vrel_Cb (nx_block, ny_block, & - icellu , Cw , & - indxui , indxuj , & + icellU , Cw , & + indxUi , indxUj , & aiU , TbU , & uocn , vocn , & uvel , vvel , & @@ -1469,11 +1445,11 @@ subroutine calc_vrel_Cb (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & TbU, & ! seabed stress factor (N/m^2) @@ -1505,9 +1481,9 @@ subroutine calc_vrel_Cb (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ! (magnitude of relative ocean current)*rhow*drag*aice vrel(i,j) = aiU(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & @@ -1523,19 +1499,19 @@ end subroutine calc_vrel_Cb ! Compute seabed stress (diagnostic) subroutine calc_seabed_stress (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & uvel , vvel , & Cb , & taubxU , taubyU) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) @@ -1553,9 +1529,9 @@ subroutine calc_seabed_stress (nx_block, ny_block, & character(len=*), parameter :: subname = '(calc_seabed_stress)' - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) taubxU(i,j) = -uvel(i,j)*Cb(i,j) taubyU(i,j) = -vvel(i,j)*Cb(i,j) @@ -1570,9 +1546,9 @@ end subroutine calc_seabed_stress ! Av = A(u,v)_[y] * vvel (y components of A(u,v) * (u,v)) subroutine matvec (nx_block, ny_block, & - icellu , icellt , & - indxui , indxuj , & - indxti , indxtj , & + icellU , icellT , & + indxUi , indxUj , & + indxTi , indxTj , & dxT , dyT , & dxhy , dyhx , & cxp , cyp , & @@ -1588,14 +1564,14 @@ subroutine matvec (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu, & ! total count when iceumask is true - icellt ! no. of cells where icetmask = 1 + icellU, & ! total count when iceUmask = .true. + icellT ! total count when iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj , & ! compressed index in j-direction - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj , & ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & dxT , & ! width of T-cell through the middle (m) @@ -1664,9 +1640,9 @@ subroutine matvec (nx_block, ny_block, & str(:,:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1807,15 +1783,15 @@ subroutine matvec (nx_block, ny_block, & str(i,j,8) = strp_tmp - strm_tmp + str12sn & - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw - enddo ! ij - icellt + enddo ! ij - icellT !----------------------------------------------------------------- ! Form Au and Av !----------------------------------------------------------------- - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s @@ -1829,7 +1805,7 @@ subroutine matvec (nx_block, ny_block, & Au(i,j) = ccaimp*uvel(i,j) - ccb*vvel(i,j) - strintx Av(i,j) = ccaimp*vvel(i,j) + ccb*uvel(i,j) - strinty - enddo ! ij - icellu + enddo ! ij - icellU end subroutine matvec @@ -1839,8 +1815,8 @@ end subroutine matvec ! does not depend on (u,v) and thus do not change during the nonlinear iteration subroutine calc_bfix (nx_block , ny_block , & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & umassdti , & forcexU , forceyU , & uvel_init, vvel_init, & @@ -1848,11 +1824,11 @@ subroutine calc_bfix (nx_block , ny_block , & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where iceumask = 1 + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel_init,& ! x-component of velocity (m/s), beginning of time step @@ -1872,9 +1848,9 @@ subroutine calc_bfix (nx_block , ny_block , & character(len=*), parameter :: subname = '(calc_bfix)' - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) bxfix(i,j) = umassdti(i,j)*uvel_init(i,j) + forcexU(i,j) byfix(i,j) = umassdti(i,j)*vvel_init(i,j) + forceyU(i,j) @@ -1889,8 +1865,8 @@ end subroutine calc_bfix ! depending on (u,v) subroutine calc_bvec (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & stPr , uarear , & waterxU , wateryU , & bxfix , byfix , & @@ -1899,11 +1875,11 @@ subroutine calc_bvec (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uarear , & ! 1/uarea @@ -1941,9 +1917,9 @@ subroutine calc_bvec (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ! ice/ocean stress taux = vrel(i,j)*waterxU(i,j) ! NOTE this is not the entire @@ -1969,20 +1945,19 @@ end subroutine calc_bvec ! Av = A(u,v)_[y] * vvel (y components of A(u,v) * (u,v)) subroutine residual_vec (nx_block , ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & bx , by , & Au , Av , & - Fx , Fy , & - sum_squared) + Fx , Fy ) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & bx , & ! b vector, bx = taux + bxfix (N/m^2) @@ -1994,9 +1969,6 @@ subroutine residual_vec (nx_block , ny_block, & Fx , & ! x residual vector, Fx = bx - Au (N/m^2) Fy ! y residual vector, Fy = by - Av (N/m^2) - real (kind=dbl_kind), intent(out), optional :: & - sum_squared ! sum of squared residual vector components - ! local variables integer (kind=int_kind) :: & @@ -2005,22 +1977,15 @@ subroutine residual_vec (nx_block , ny_block, & character(len=*), parameter :: subname = '(residual_vec)' !----------------------------------------------------------------- - ! compute residual and sum its squared components + ! compute residual !----------------------------------------------------------------- - if (present(sum_squared)) then - sum_squared = c0 - endif - - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) Fx(i,j) = bx(i,j) - Au(i,j) Fy(i,j) = by(i,j) - Av(i,j) - if (present(sum_squared)) then - sum_squared = sum_squared + Fx(i,j)**2 + Fy(i,j)**2 - endif enddo ! ij end subroutine residual_vec @@ -2031,8 +1996,8 @@ end subroutine residual_vec ! Part 1: compute the contributions to the diagonal from the rheology term subroutine formDiag_step1 (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & dxT , dyT , & dxhy , dyhx , & cxp , cyp , & @@ -2042,11 +2007,11 @@ subroutine formDiag_step1 (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where icetmask = 1 + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & dxT , & ! width of T-cell through the middle (m) @@ -2202,10 +2167,10 @@ subroutine formDiag_step1 (nx_block, ny_block, & dj = 1 endif - do ij = 1, icellu + do ij = 1, icellU - iu = indxui(ij) - ju = indxuj(ij) + iu = indxUi(ij) + ju = indxUj(ij) i = iu + di j = ju + dj @@ -2406,8 +2371,8 @@ end subroutine formDiag_step1 ! Part 2: compute diagonal subroutine formDiag_step2 (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & Drheo , vrel , & umassdti, & uarear , Cb , & @@ -2415,11 +2380,11 @@ subroutine formDiag_step2 (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & vrel, & ! coefficient for tauw @@ -2465,9 +2430,9 @@ subroutine formDiag_step2 (nx_block, ny_block, & ! Drheo(i,j,7) corresponds to str(i+1,j,7) ! Drheo(i,j,8) corresponds to str(i+1,j+1,8)) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s @@ -2484,50 +2449,121 @@ end subroutine formDiag_step2 !======================================================================= -! Compute squared l^2 norm of a grid function (tpu,tpv) +! Compute global l^2 norm of a vector field (field_x, field_y) - subroutine calc_L2norm_squared (nx_block, ny_block, & - icellu , & - indxui , indxuj , & - tpu , tpv , & - L2norm) + function global_norm (nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + field_x , field_y ) & + result(norm) + + use ice_domain, only: distrb_info + use ice_domain_size, only: max_blocks integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + nx_block, ny_block ! block dimensions - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + integer (kind=int_kind), dimension (max_blocks), intent(in) :: & + icellU ! total count when iceumask is true - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - tpu , & ! x-component of vector grid function - tpv ! y-component of vector grid function + integer (kind=int_kind), dimension (nx_block*ny_block,max_blocks), intent(in) :: & + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & + field_x , & ! x-component of vector field + field_y ! y-component of vector field - real (kind=dbl_kind), intent(out) :: & - L2norm ! squared l^2 norm of vector grid function (tpu,tpv) + real (kind=dbl_kind) :: & + norm ! l^2 norm of vector field ! local variables integer (kind=int_kind) :: & - i, j, ij + i, j, ij, iblk - character(len=*), parameter :: subname = '(calc_L2norm_squared)' + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + squared ! temporary array for summed squared components - !----------------------------------------------------------------- - ! compute squared l^2 norm of vector grid function (tpu,tpv) - !----------------------------------------------------------------- + character(len=*), parameter :: subname = '(global_norm)' - L2norm = c0 + norm = sqrt(global_dot_product (nx_block , ny_block , & + icellU , & + indxUi , indxUj , & + field_x , field_y , & + field_x , field_y )) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + end function global_norm - L2norm = L2norm + tpu(i,j)**2 + tpv(i,j)**2 - enddo ! ij +!======================================================================= + +! Compute global dot product of two grid vectors, each split into X and Y components + + function global_dot_product (nx_block , ny_block , & + icellU , & + indxUi , indxUj , & + vector1_x , vector1_y, & + vector2_x , vector2_y) & + result(dot_product) + + use ice_domain, only: distrb_info + use ice_domain_size, only: max_blocks + use ice_fileunits, only: bfbflag + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + integer (kind=int_kind), dimension (max_blocks), intent(in) :: & + icellU ! total count when iceumask is true + + integer (kind=int_kind), dimension (nx_block*ny_block,max_blocks), intent(in) :: & + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & + vector1_x , & ! x-component of first vector + vector1_y , & ! y-component of first vector + vector2_x , & ! x-component of second vector + vector2_y ! y-component of second vector + + real (kind=dbl_kind) :: & + dot_product ! l^2 norm of vector field + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij, iblk + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + prod ! temporary array + + real (kind=dbl_kind), dimension(max_blocks) :: & + dot ! temporary scalar for accumulating the result + + character(len=*), parameter :: subname = '(global_dot_product)' + + prod = c0 + dot = c0 + + !$OMP PARALLEL DO PRIVATE(i, j, ij, iblk) + do iblk = 1, nblocks + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) + prod(i,j,iblk) = vector1_x(i,j,iblk)*vector2_x(i,j,iblk) + vector1_y(i,j,iblk)*vector2_y(i,j,iblk) + dot(iblk) = dot(iblk) + prod(i,j,iblk) + enddo ! ij + enddo + !$OMP END PARALLEL DO + + ! Use local summation result unless bfbflag is active + if (bfbflag == 'off') then + dot_product = global_sum(sum(dot), distrb_info) + else + dot_product = global_sum(prod, distrb_info, field_loc_NEcorner) + endif - end subroutine calc_L2norm_squared + end function global_dot_product !======================================================================= @@ -2535,8 +2571,8 @@ end subroutine calc_L2norm_squared subroutine arrays_to_vec (nx_block, ny_block , & nblocks , max_blocks, & - icellu , ntot , & - indxui , indxuj , & + icellU , ntot , & + indxUi , indxUj , & tpu , tpv , & outvec) @@ -2547,11 +2583,11 @@ subroutine arrays_to_vec (nx_block, ny_block , & ntot ! size of problem for Anderson integer (kind=int_kind), dimension (max_blocks), intent(in) :: & - icellu + icellU integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block, max_blocks), intent(in) :: & tpu , & ! x-component of vector @@ -2575,9 +2611,9 @@ subroutine arrays_to_vec (nx_block, ny_block , & tot = 0 do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) tot = tot + 1 outvec(tot) = tpu(i, j, iblk) tot = tot + 1 @@ -2593,8 +2629,8 @@ end subroutine arrays_to_vec subroutine vec_to_arrays (nx_block, ny_block , & nblocks , max_blocks, & - icellu , ntot , & - indxui , indxuj , & + icellU , ntot , & + indxUi , indxUj , & invec , & tpu , tpv) @@ -2605,11 +2641,11 @@ subroutine vec_to_arrays (nx_block, ny_block , & ntot ! size of problem for Anderson integer (kind=int_kind), dimension (max_blocks), intent(in) :: & - icellu + icellU integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (ntot), intent(in) :: & invec ! input 1D vector @@ -2634,9 +2670,9 @@ subroutine vec_to_arrays (nx_block, ny_block , & tot = 0 do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) tot = tot + 1 tpu(i, j, iblk) = invec(tot) tot = tot + 1 @@ -2780,6 +2816,7 @@ subroutine fgmres (zetax2 , etax2 , & real (kind=dbl_kind) :: & norm_residual , & ! current L^2 norm of residual vector inverse_norm , & ! inverse of the norm of a vector + norm_rhs , & ! L^2 norm of right-hand-side vector nu, t ! local temporary values integer (kind=int_kind) :: & @@ -2819,14 +2856,25 @@ subroutine fgmres (zetax2 , etax2 , & arnoldi_basis_x = c0 arnoldi_basis_y = c0 + ! solution is zero if RHS is zero + norm_rhs = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + bx , by ) + if (norm_rhs == c0) then + solx = bx + soly = by + return + endif + ! Residual of the initial iterate !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -2838,8 +2886,8 @@ subroutine fgmres (zetax2 , etax2 , & uarear (:,:,iblk) , & workspace_x(:,:,iblk) , workspace_y(:,:,iblk)) call residual_vec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & bx (:,:,iblk), by (:,:,iblk), & workspace_x(:,:,iblk), workspace_y(:,:,iblk), & arnoldi_basis_x (:,:,iblk, 1), & @@ -2850,18 +2898,11 @@ subroutine fgmres (zetax2 , etax2 , & ! Start outer (restarts) loop do ! Compute norm of initial residual - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:,iblk), & - arnoldi_basis_x(:,:,iblk, 1) , & - arnoldi_basis_y(:,:,iblk, 1) , & - norm_squared(iblk)) - - enddo - !$OMP END PARALLEL DO - norm_residual = sqrt(global_sum(sum(norm_squared), distrb_info)) + norm_residual = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, 1), & + arnoldi_basis_y(:,:,:, 1)) if (my_task == master_task .and. monitor_fgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_fgmres: iter_fgmres= ", nbiter, & @@ -2877,9 +2918,9 @@ subroutine fgmres (zetax2 , etax2 , & inverse_norm = c1 / norm_residual !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, 1) = arnoldi_basis_x(i, j, iblk, 1) * inverse_norm arnoldi_basis_y(i, j, iblk, 1) = arnoldi_basis_y(i, j, iblk, 1) * inverse_norm @@ -2907,6 +2948,7 @@ subroutine fgmres (zetax2 , etax2 , & call precondition(zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & arnoldi_basis_x(:,:,:,initer), & arnoldi_basis_y(:,:,:,initer), & diagx , diagy , & @@ -2931,9 +2973,9 @@ subroutine fgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -2955,17 +2997,11 @@ subroutine fgmres (zetax2 , etax2 , & hessenberg) ! Compute norm of new Arnoldi vector and update Hessenberg matrix - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:, iblk) , & - arnoldi_basis_x(:,:,iblk, nextit), & - arnoldi_basis_y(:,:,iblk, nextit), & - norm_squared(iblk)) - enddo - !$OMP END PARALLEL DO - hessenberg(nextit,initer) = sqrt(global_sum(sum(norm_squared), distrb_info)) + hessenberg(nextit,initer) = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) ! Watch out for happy breakdown if (.not. almost_zero( hessenberg(nextit,initer) ) ) then @@ -2973,9 +3009,9 @@ subroutine fgmres (zetax2 , etax2 , & inverse_norm = c1 / hessenberg(nextit,initer) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm @@ -3039,9 +3075,9 @@ subroutine fgmres (zetax2 , etax2 , & t = rhs_hess(it) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) solx(i, j, iblk) = solx(i, j, iblk) + t * orig_basis_x(i, j, iblk, it) soly(i, j, iblk) = soly(i, j, iblk) + t * orig_basis_y(i, j, iblk, it) @@ -3083,9 +3119,9 @@ subroutine fgmres (zetax2 , etax2 , & do it = 1, nextit !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_x(i, j, iblk, it) workspace_y(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) @@ -3109,6 +3145,7 @@ end subroutine fgmres subroutine pgmres (zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & bx , by , & diagx , diagy , & tolerance, maxinner, & @@ -3116,6 +3153,11 @@ subroutine pgmres (zetax2 , etax2 , & solx , soly , & nbiter) + use ice_boundary, only: ice_HaloUpdate + use ice_domain, only: maskhalo_dyn, halo_info + use ice_fileunits, only: bfbflag + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) etax2 ! etax2 = 2*eta (shear viscosity) @@ -3125,6 +3167,9 @@ subroutine pgmres (zetax2 , etax2 , & Cb , & ! seabed stress coefficient umassdti ! mass of U-cell/dte (kg/m^2 s) + type (ice_halo), intent(in) :: & + halo_info_mask ! ghost cell update info for masked halo + real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & bx , & ! Right hand side of the linear system (x components) by ! Right hand side of the linear system (y components) @@ -3217,9 +3262,9 @@ subroutine pgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -3231,8 +3276,8 @@ subroutine pgmres (zetax2 , etax2 , & uarear (:,:,iblk) , & workspace_x(:,:,iblk) , workspace_y(:,:,iblk)) call residual_vec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & bx (:,:,iblk), by (:,:,iblk), & workspace_x(:,:,iblk), workspace_y(:,:,iblk), & arnoldi_basis_x (:,:,iblk, 1), & @@ -3243,18 +3288,11 @@ subroutine pgmres (zetax2 , etax2 , & ! Start outer (restarts) loop do ! Compute norm of initial residual - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:, iblk), & - arnoldi_basis_x(:,:,iblk, 1), & - arnoldi_basis_y(:,:,iblk, 1), & - norm_squared(iblk)) - - enddo - !$OMP END PARALLEL DO - norm_residual = sqrt(global_sum(sum(norm_squared), distrb_info)) + norm_residual = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, 1), & + arnoldi_basis_y(:,:,:, 1)) if (my_task == master_task .and. monitor_pgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_pgmres: iter_pgmres= ", nbiter, & @@ -3270,9 +3308,9 @@ subroutine pgmres (zetax2 , etax2 , & inverse_norm = c1 / norm_residual !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, 1) = arnoldi_basis_x(i, j, iblk, 1) * inverse_norm arnoldi_basis_y(i, j, iblk, 1) = arnoldi_basis_y(i, j, iblk, 1) * inverse_norm @@ -3301,21 +3339,36 @@ subroutine pgmres (zetax2 , etax2 , & call precondition(zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & arnoldi_basis_x(:,:,:,initer), & arnoldi_basis_y(:,:,:,initer), & diagx , diagy , & precond_type, & workspace_x , workspace_y) - ! NOTE: halo updates for (workspace_x, workspace_y) - ! are skipped here for efficiency since this is just a preconditioner + ! Update workspace with boundary values + ! NOTE: skipped for efficiency since this is just a preconditioner + ! unless bfbflag is active + if (bfbflag /= 'off') then + call stack_fields(workspace_x, workspace_y, fld2) + call ice_timer_start(timer_bound) + if (maskhalo_dyn) then + call ice_HaloUpdate (fld2, halo_info_mask, & + field_loc_NEcorner, field_type_vector) + else + call ice_HaloUpdate (fld2, halo_info, & + field_loc_NEcorner, field_type_vector) + endif + call ice_timer_stop(timer_bound) + call unstack_fields(fld2, workspace_x, workspace_y) + endif !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -3337,17 +3390,11 @@ subroutine pgmres (zetax2 , etax2 , & hessenberg) ! Compute norm of new Arnoldi vector and update Hessenberg matrix - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:, iblk) , & - arnoldi_basis_x(:,:,iblk, nextit), & - arnoldi_basis_y(:,:,iblk, nextit), & - norm_squared(iblk)) - enddo - !$OMP END PARALLEL DO - hessenberg(nextit,initer) = sqrt(global_sum(sum(norm_squared), distrb_info)) + hessenberg(nextit,initer) = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) ! Watch out for happy breakdown if (.not. almost_zero( hessenberg(nextit,initer) ) ) then @@ -3355,9 +3402,9 @@ subroutine pgmres (zetax2 , etax2 , & inverse_norm = c1 / hessenberg(nextit,initer) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm @@ -3423,9 +3470,9 @@ subroutine pgmres (zetax2 , etax2 , & t = rhs_hess(it) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + t * arnoldi_basis_x(i, j, iblk, it) workspace_y(i, j, iblk) = workspace_y(i, j, iblk) + t * arnoldi_basis_y(i, j, iblk, it) @@ -3438,6 +3485,7 @@ subroutine pgmres (zetax2 , etax2 , & call precondition(zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & workspace_x , workspace_y, & diagx , diagy , & precond_type, & @@ -3479,9 +3527,9 @@ subroutine pgmres (zetax2 , etax2 , & do it = 1, nextit !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_x(i, j, iblk, it) workspace_y(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) @@ -3504,6 +3552,7 @@ end subroutine pgmres subroutine precondition(zetax2 , etax2, & Cb , vrel , & umassdti , & + halo_info_mask, & vx , vy , & diagx , diagy, & precond_type, & @@ -3518,6 +3567,9 @@ subroutine precondition(zetax2 , etax2, & Cb , & ! seabed stress coefficient umassdti ! mass of U-cell/dte (kg/m^2 s) + type (ice_halo), intent(in) :: & + halo_info_mask ! ghost cell update info for masked halo + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & vx , & ! input vector (x components) vy ! input vector (y components) @@ -3560,9 +3612,9 @@ subroutine precondition(zetax2 , etax2, & elseif (precond_type == 'diag') then ! Jacobi preconditioner (diagonal) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) wx(i,j,iblk) = vx(i,j,iblk) / diagx(i,j,iblk) wy(i,j,iblk) = vy(i,j,iblk) / diagy(i,j,iblk) @@ -3579,6 +3631,7 @@ subroutine precondition(zetax2 , etax2, & call pgmres (zetax2, etax2 , & Cb , vrel , & umassdti , & + halo_info_mask , & vx , vy , & diagx , diagy , & tolerance, maxinner, & @@ -3626,46 +3679,27 @@ subroutine orthogonalize(ortho_type , initer , & ij , & ! compressed index i, j ! grid indices - real (kind=dbl_kind), dimension (max_blocks) :: & - local_dot ! local array value to accumulate dot product of grid function over blocks - - real (kind=dbl_kind), dimension(maxinner) :: & - dotprod_local ! local array to accumulate several dot product computations - character(len=*), parameter :: subname = '(orthogonalize)' if (trim(ortho_type) == 'cgs') then ! Classical Gram-Schmidt ! Classical Gram-Schmidt orthogonalisation process ! First loop of Gram-Schmidt (compute coefficients) - dotprod_local = c0 do it = 1, initer - local_dot = c0 - - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) - - local_dot(iblk) = local_dot(iblk) + & - (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & - (arnoldi_basis_y(i, j, iblk, it) * arnoldi_basis_y(i, j, iblk, nextit)) - enddo ! ij - enddo - !$OMP END PARALLEL DO - - dotprod_local(it) = sum(local_dot) + hessenberg(it, initer) = global_dot_product(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, it) , & + arnoldi_basis_y(:,:,:, it) , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) end do - - hessenberg(1:initer, initer) = global_allreduce_sum(dotprod_local(1:initer), distrb_info) - ! Second loop of Gram-Schmidt (orthonormalize) do it = 1, initer !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) @@ -3678,28 +3712,19 @@ subroutine orthogonalize(ortho_type , initer , & elseif (trim(ortho_type) == 'mgs') then ! Modified Gram-Schmidt ! Modified Gram-Schmidt orthogonalisation process do it = 1, initer - local_dot = c0 - - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) - - local_dot(iblk) = local_dot(iblk) + & - (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & - (arnoldi_basis_y(i, j, iblk, it) * arnoldi_basis_y(i, j, iblk, nextit)) - enddo ! ij - enddo - !$OMP END PARALLEL DO - - hessenberg(it,initer) = global_sum(sum(local_dot), distrb_info) + hessenberg(it, initer) = global_dot_product(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, it) , & + arnoldi_basis_y(:,:,:, it) , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index c2cc986f8..45ae58d8b 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -73,15 +73,9 @@ subroutine input_data npt, dt, ndtd, days_per_year, use_leap_years, & write_ic, dump_last, npt_unit use ice_arrays_column, only: oceanmixed_ice -#ifdef UNDEPRECATE_CESMPONDS - use ice_restart_column, only: restart_age, restart_FY, restart_lvl, & - restart_pond_cesm, restart_pond_lvl, restart_pond_topo, restart_aero, & - restart_fsd, restart_iso, restart_snow -#else use ice_restart_column, only: restart_age, restart_FY, restart_lvl, & restart_pond_lvl, restart_pond_topo, restart_aero, & restart_fsd, restart_iso, restart_snow -#endif use ice_restart_shared, only: & restart, restart_ext, restart_coszen, restart_dir, restart_file, pointer_file, & runid, runtype, use_restart_time, restart_format, lcdf64 @@ -110,8 +104,8 @@ subroutine input_data grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & - dxrect, dyrect, & - pgl_global_ext + dxrect, dyrect, dxscale, dyscale, scale_dxdy, & + lonrefrect, latrefrect, pgl_global_ext use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & evp_algorithm, visc_method, & seabed_stress, seabed_stress_method, & @@ -165,23 +159,12 @@ subroutine input_data logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_pond logical (kind=log_kind) :: tr_iso, tr_aero, tr_fsd, tr_snow -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo -#endif integer (kind=int_kind) :: numin, numax ! unit number limits -#ifdef UNDEPRECATE_CESMPONDS - integer (kind=int_kind) :: rpcesm, rplvl, rptopo -#else integer (kind=int_kind) :: rplvl, rptopo -#endif real (kind=dbl_kind) :: Cf, ksno, puny character (len=char_len) :: abort_list -#ifdef CESMCOUPLED - character (len=64) :: tmpstr -#endif character (len=128) :: tmpstr2 character(len=*), parameter :: subname='(input_data)' @@ -212,6 +195,8 @@ subroutine input_data bathymetry_file, use_bathymetry, nfsd, bathymetry_format, & ncat, nilyr, nslyr, nblyr, & kcatbound, gridcpl_file, dxrect, dyrect, & + dxscale, dyscale, lonrefrect, latrefrect, & + scale_dxdy, & close_boundaries, orca_halogrid, grid_ice, kmt_type, & grid_atm, grid_ocn @@ -219,9 +204,6 @@ subroutine input_data tr_iage, restart_age, & tr_FY, restart_FY, & tr_lvl, restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, restart_pond_cesm, & -#endif tr_pond_lvl, restart_pond_lvl, & tr_pond_topo, restart_pond_topo, & tr_snow, restart_snow, & @@ -398,6 +380,11 @@ subroutine input_data ksno = 0.3_dbl_kind ! snow thermal conductivity dxrect = 0.0_dbl_kind ! user defined grid spacing in cm in x direction dyrect = 0.0_dbl_kind ! user defined grid spacing in cm in y direction + lonrefrect = -156.50_dbl_kind ! lower left corner lon for rectgrid + latrefrect = 71.35_dbl_kind ! lower left corner lat for rectgrid + scale_dxdy = .false. ! apply dxscale, dyscale to rectgrid + dxscale = 1.0_dbl_kind ! user defined rectgrid x-grid scale factor (e.g., 1.02) + dyscale = 1.0_dbl_kind ! user defined rectgrid y-grid scale factor (e.g., 1.02) close_boundaries = .false. ! true = set land on edges of grid seabed_stress= .false. ! if true, seabed stress for landfast is on seabed_stress_method = 'LKD'! LKD = Lemieux et al 2015, probabilistic = Dupont et al. in prep @@ -412,7 +399,7 @@ subroutine input_data deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscosities (EVP, Hunke 2001) deltaminVP = 2e-9_dbl_kind ! minimum delta for viscosities (VP, Hibler 1979) capping_method = 'max' ! method for capping of viscosities (max=Hibler 1979,sum=Kreyscher2000) - maxits_nonlin = 4 ! max nb of iteration for nonlinear solver + maxits_nonlin = 10 ! max nb of iteration for nonlinear solver precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), ! 'pgmres' (Jacobi-preconditioned GMRES) dim_fgmres = 50 ! size of fgmres Krylov subspace @@ -424,7 +411,7 @@ subroutine input_data monitor_pgmres = .false. ! print pgmres residual norm ortho_type = 'mgs' ! orthogonalization procedure 'cgs' or 'mgs' reltol_nonlin = 1e-8_dbl_kind ! nonlinear stopping criterion: reltol_nonlin*res(k=0) - reltol_fgmres = 1e-2_dbl_kind ! fgmres stopping criterion: reltol_fgmres*res(k) + reltol_fgmres = 1e-1_dbl_kind ! fgmres stopping criterion: reltol_fgmres*res(k) reltol_pgmres = 1e-6_dbl_kind ! pgmres stopping criterion: reltol_pgmres*res(k) algo_nonlin = 'picard' ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) fpfunc_andacc = 1 ! fixed point function for Anderson acceleration: @@ -438,11 +425,7 @@ subroutine input_data conserv_check = .false. ! tracer conservation check shortwave = 'ccsm3' ! 'ccsm3' or 'dEdd' (delta-Eddington) albedo_type = 'ccsm3' ! 'ccsm3' or 'constant' -#ifdef UNDEPRECATE_0LAYER - ktherm = 1 ! -1 = OFF, 0 = 0-layer, 1 = BL99, 2 = mushy thermo -#else ktherm = 1 ! -1 = OFF, 1 = BL99, 2 = mushy thermo -#endif conduct = 'bubbly' ! 'MU71' or 'bubbly' (Pringle et al 2007) coriolis = 'latitude' ! latitude dependent, or 'constant' ssh_stress = 'geostrophic' ! 'geostrophic' or 'coupled' @@ -552,10 +535,6 @@ subroutine input_data restart_FY = .false. ! ice age restart tr_lvl = .false. ! level ice restart_lvl = .false. ! level ice restart -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm = .false. ! CESM melt ponds - restart_pond_cesm = .false. ! melt ponds restart -#endif tr_pond_lvl = .false. ! level-ice melt ponds restart_pond_lvl = .false. ! melt ponds restart tr_pond_topo = .false. ! explicit melt ponds (topographic) @@ -853,6 +832,11 @@ subroutine input_data call broadcast_scalar(grid_format, master_task) call broadcast_scalar(dxrect, master_task) call broadcast_scalar(dyrect, master_task) + call broadcast_scalar(scale_dxdy, master_task) + call broadcast_scalar(dxscale, master_task) + call broadcast_scalar(dyscale, master_task) + call broadcast_scalar(lonrefrect, master_task) + call broadcast_scalar(latrefrect, master_task) call broadcast_scalar(close_boundaries, master_task) call broadcast_scalar(grid_type, master_task) call broadcast_scalar(grid_ice, master_task) @@ -968,6 +952,7 @@ subroutine input_data call broadcast_scalar(albsnowi, master_task) call broadcast_scalar(ahmax, master_task) call broadcast_scalar(atmbndy, master_task) + call broadcast_scalar(default_season, master_task) call broadcast_scalar(fyear_init, master_task) call broadcast_scalar(ycycle, master_task) call broadcast_scalar(atm_data_format, master_task) @@ -1021,10 +1006,6 @@ subroutine input_data call broadcast_scalar(restart_FY, master_task) call broadcast_scalar(tr_lvl, master_task) call broadcast_scalar(restart_lvl, master_task) -#ifdef UNDEPRECATE_CESMPONDS - call broadcast_scalar(tr_pond_cesm, master_task) - call broadcast_scalar(restart_pond_cesm, master_task) -#endif call broadcast_scalar(tr_pond_lvl, master_task) call broadcast_scalar(restart_pond_lvl, master_task) call broadcast_scalar(tr_pond_topo, master_task) @@ -1117,9 +1098,6 @@ subroutine input_data restart_age = .false. restart_fy = .false. restart_lvl = .false. -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm = .false. -#endif restart_pond_lvl = .false. restart_pond_topo = .false. restart_snow = .false. @@ -1236,29 +1214,15 @@ subroutine input_data endif endif -#ifdef UNDEPRECATE_CESMPONDS - rpcesm = 0 -#endif rplvl = 0 rptopo = 0 -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) rpcesm = 1 -#endif if (tr_pond_lvl ) rplvl = 1 if (tr_pond_topo) rptopo = 1 tr_pond = .false. ! explicit melt ponds -#ifdef UNDEPRECATE_CESMPONDS - if (rpcesm + rplvl + rptopo > 0) tr_pond = .true. -#else if (rplvl + rptopo > 0) tr_pond = .true. -#endif -#ifdef UNDEPRECATE_CESMPONDS - if (rpcesm + rplvl + rptopo > 1) then -#else if (rplvl + rptopo > 1) then -#endif if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: Must use only one melt pond scheme' endif @@ -1482,13 +1446,6 @@ subroutine input_data abort_list = trim(abort_list)//":16" endif -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and frzpnd=cesm' - abort_list = trim(abort_list)//":17" - endif -#endif - if (.not. tr_lvl) then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and tr_lvl=F' abort_list = trim(abort_list)//":18" @@ -1570,7 +1527,12 @@ subroutine input_data wave_spec = .false. if (tr_fsd .and. (trim(wave_spec_type) /= 'none')) wave_spec = .true. - + if (tr_fsd .and. (trim(wave_spec_type) == 'none')) then + if (my_task == master_task) then + write(nu_diag,*) subname//' WARNING: tr_fsd=T but wave_spec=F - not recommended' + endif + end if + ! compute grid locations for thermo, u and v fields grid_ice_thrm = 'T' @@ -1907,10 +1869,6 @@ subroutine input_data tmpstr2 = ' : Bitz and Lipscomb 1999 thermo' elseif (ktherm == 2) then tmpstr2 = ' : mushy-layer thermo' -#ifdef UNDEPRECATE_0LAYER - elseif (ktherm == 0) then - tmpstr2 = ' : zero-layer thermo' -#endif elseif (ktherm < 0) then tmpstr2 = ' : Thermodynamics disabled' else @@ -2062,18 +2020,18 @@ subroutine input_data if (wave_spec) then tmpstr2 = ' : use wave spectrum for floe size distribution' else - tmpstr2 = ' : floe size distribution does not use wave spectrum' + tmpstr2 = 'WARNING : floe size distribution does not use wave spectrum' endif write(nu_diag,1010) ' wave_spec = ', wave_spec,trim(tmpstr2) if (wave_spec) then if (trim(wave_spec_type) == 'none') then tmpstr2 = ' : no wave data provided, no wave-ice interactions' elseif (trim(wave_spec_type) == 'profile') then - tmpstr2 = ' : use fixed dummy wave spectrum for testing' + tmpstr2 = ' : use fixed dummy wave spectrum for testing, sea surface height generated using constant phase (1 iteration of wave fracture)' elseif (trim(wave_spec_type) == 'constant') then - tmpstr2 = ' : constant wave spectrum data file provided for testing' + tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using constant phase (1 iteration of wave fracture)' elseif (trim(wave_spec_type) == 'random') then - tmpstr2 = ' : wave data file provided, spectrum generated using random number' + tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using random number (multiple iterations of wave fracture to convergence)' else tmpstr2 = ' : unknown value' endif @@ -2091,14 +2049,7 @@ subroutine input_data write(nu_diag,*) ' ' write(nu_diag,*) ' Melt ponds' write(nu_diag,*) '--------------------------------' -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - write(nu_diag,1010) ' tr_pond_cesm = ', tr_pond_cesm,' : CESM pond formulation' - write(nu_diag,1002) ' pndaspect = ', pndaspect,' : ratio of pond depth to area fraction' - elseif (tr_pond_lvl) then -#else if (tr_pond_lvl) then -#endif write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl,' : level-ice pond formulation' write(nu_diag,1002) ' pndaspect = ', pndaspect,' : ratio of pond depth to area fraction' write(nu_diag,1000) ' dpscale = ', dpscale,' : time scale for flushing in permeable ice' @@ -2211,9 +2162,6 @@ subroutine input_data if (tr_lvl) write(nu_diag,1010) ' tr_lvl = ', tr_lvl,' : ridging related tracers' if (tr_pond_lvl) write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl,' : level-ice pond formulation' if (tr_pond_topo) write(nu_diag,1010) ' tr_pond_topo = ', tr_pond_topo,' : topo pond formulation' -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) write(nu_diag,1010) ' tr_pond_cesm = ', tr_pond_cesm,' : CESM pond formulation' -#endif if (tr_snow) write(nu_diag,1010) ' tr_snow = ', tr_snow,' : advanced snow physics' if (tr_iage) write(nu_diag,1010) ' tr_iage = ', tr_iage,' : chronological ice age' if (tr_FY) write(nu_diag,1010) ' tr_FY = ', tr_FY,' : first-year ice area' @@ -2338,9 +2286,6 @@ subroutine input_data write(nu_diag,1011) ' restart_age = ', restart_age write(nu_diag,1011) ' restart_FY = ', restart_FY write(nu_diag,1011) ' restart_lvl = ', restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - write(nu_diag,1011) ' restart_pond_cesm= ', restart_pond_cesm -#endif write(nu_diag,1011) ' restart_pond_lvl = ', restart_pond_lvl write(nu_diag,1011) ' restart_pond_topo= ', restart_pond_topo write(nu_diag,1011) ' restart_snow = ', restart_snow @@ -2439,11 +2384,7 @@ subroutine input_data call icepack_init_tracer_flags(tr_iage_in=tr_iage, tr_FY_in=tr_FY, & tr_lvl_in=tr_lvl, tr_iso_in=tr_iso, tr_aero_in=tr_aero, & tr_fsd_in=tr_fsd, tr_snow_in=tr_snow, tr_pond_in=tr_pond, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm_in=tr_pond_cesm, tr_pond_lvl_in=tr_pond_lvl, tr_pond_topo_in=tr_pond_topo) -#else tr_pond_lvl_in=tr_pond_lvl, tr_pond_topo_in=tr_pond_topo) -#endif call icepack_init_tracer_sizes(ncat_in=ncat, nilyr_in=nilyr, nslyr_in=nslyr, nblyr_in=nblyr, & nfsd_in=nfsd, n_algae_in=n_algae, n_iso_in=n_iso, n_aero_in=n_aero, & n_DOC_in=n_DOC, n_DON_in=n_DON, & @@ -2500,18 +2441,10 @@ subroutine init_state it , & ! tracer index iblk ! block index -#ifdef UNDEPRECATE_0LAYER - logical (kind=log_kind) :: & - heat_capacity ! from icepack -#endif integer (kind=int_kind) :: ntrcr logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo -#endif logical (kind=log_kind) :: tr_snow, tr_fsd integer (kind=int_kind) :: nt_Tsfc, nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY integer (kind=int_kind) :: nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd @@ -2525,17 +2458,10 @@ subroutine init_state !----------------------------------------------------------------- -#ifdef UNDEPRECATE_0LAYER - call icepack_query_parameters(heat_capacity_out=heat_capacity) -#endif call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & -#else tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & -#endif tr_snow_out=tr_snow, tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & nt_qice_out=nt_qice, nt_qsno_out=nt_qsno, nt_iage_out=nt_iage, nt_fy_out=nt_fy, & @@ -2569,25 +2495,6 @@ subroutine init_state file=__FILE__, line=__LINE__) endif -#ifdef UNDEPRECATE_0LAYER - if (.not.heat_capacity) then - - if (nilyr > 1) then - write(nu_diag,*) subname//' ERROR: Must have nilyr = 1 if heat_capacity=F' - write(nu_diag,*) subname//' ERROR: nilyr =', nilyr - call abort_ice(error_message=subname//' Too many ice layers', & - file=__FILE__, line=__LINE__) - endif - - if (nslyr > 1) then - write(nu_diag,*) subname//' ERROR: Must have nslyr = 1 if heat_capacity=F' - write(nu_diag,*) subname//' ERROR: nslyr =', nslyr - call abort_ice(error_message=subname//' Too many snow layers', & - file=__FILE__, line=__LINE__) - endif - - endif ! heat_capacity = F -#endif endif ! my_task !----------------------------------------------------------------- @@ -2606,12 +2513,6 @@ subroutine init_state if (tr_FY) trcr_depend(nt_FY) = 0 ! area-weighted first-year ice area if (tr_lvl) trcr_depend(nt_alvl) = 0 ! level ice area if (tr_lvl) trcr_depend(nt_vlvl) = 1 ! level ice volume -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - trcr_depend(nt_apnd) = 0 ! melt pond area - trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth - endif -#endif if (tr_pond_lvl) then trcr_depend(nt_apnd) = 2+nt_alvl ! melt pond area trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth @@ -2673,12 +2574,6 @@ subroutine init_state nt_strata (it,2) = 0 enddo -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - n_trcr_strata(nt_hpnd) = 1 ! melt pond depth - nt_strata (nt_hpnd,1) = nt_apnd ! on melt pond area - endif -#endif if (tr_pond_lvl) then n_trcr_strata(nt_apnd) = 1 ! melt pond area nt_strata (nt_apnd,1) = nt_alvl ! on level ice area diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 523c7ea2c..dfccdd413 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -129,6 +129,15 @@ module ice_grid dxrect, & ! user_specified spacing (cm) in x-direction (uniform HTN) dyrect ! user_specified spacing (cm) in y-direction (uniform HTE) + ! growth factor for variable spaced grid + real (kind=dbl_kind), public :: & + dxscale, & ! scale factor for grid spacing in x direction (e.g., 1.02) + dyscale ! scale factor for gird spacing in y direction (e.g., 1.02) + + real (kind=dbl_kind), public :: & + lonrefrect, & ! lower left lon for rectgrid + latrefrect ! lower left lat for rectgrid + ! Corners of grid boxes for history output real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & lont_bounds, & ! longitude of gridbox corners for T point @@ -171,7 +180,8 @@ module ice_grid logical (kind=log_kind), public :: & use_bathymetry, & ! flag for reading in bathymetry_file - pgl_global_ext ! flag for init primary grid lengths (global ext.) + pgl_global_ext, & ! flag for init primary grid lengths (global ext.) + scale_dxdy ! flag to apply scale factor to vary dx/dy in rectgrid logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & tmask , & ! land/boundary mask, thickness (T-cell) @@ -182,11 +192,6 @@ module ice_grid lmask_n, & ! northern hemisphere mask lmask_s ! southern hemisphere mask - logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & - iceumask, & ! ice extent mask (U-cell) - icenmask, & ! ice extent mask (N-cell) - iceemask ! ice extent mask (E-cell) - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & rndex_global ! global index for local subdomain (dbl) @@ -267,7 +272,6 @@ subroutine alloc_grid umaskCD (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) nmask (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) emask (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) - iceumask (nx_block,ny_block,max_blocks), & ! u mask for dynamics lmask_n (nx_block,ny_block,max_blocks), & ! northern hemisphere mask lmask_s (nx_block,ny_block,max_blocks), & ! southern hemisphere mask rndex_global(nx_block,ny_block,max_blocks), & ! global index for local subdomain (dbl) @@ -288,8 +292,6 @@ subroutine alloc_grid if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & - iceemask (nx_block,ny_block,max_blocks), & ! e mask for dynamics - icenmask (nx_block,ny_block,max_blocks), & ! n mask for dynamics ratiodxN (nx_block,ny_block,max_blocks), & ratiodyE (nx_block,ny_block,max_blocks), & ratiodxNr(nx_block,ny_block,max_blocks), & @@ -475,6 +477,8 @@ subroutine init_grid2 ! lat, lon, cell widths, angle, land mask !----------------------------------------------------------------- + l_readCenter = .false. + call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -945,7 +949,6 @@ subroutine popgrid_nc call ice_open_nc(kmt_file,fid_kmt) diag = .true. ! write diagnostic info - l_readCenter = .false. !----------------------------------------------------------------- ! topography !----------------------------------------------------------------- @@ -1360,7 +1363,7 @@ subroutine rectgrid imid, jmid real (kind=dbl_kind) :: & - length, & + length, & rad_to_deg real (kind=dbl_kind), dimension(:,:), allocatable :: & @@ -1383,69 +1386,71 @@ subroutine rectgrid allocate(work_g1(nx_global,ny_global)) - ! Weddell Sea - ! lower left corner of grid is 55W, 75S - - ! Barrow AK - ! lower left corner of grid is 156.5W, 71.35N - - if (my_task == master_task) then - work_g1 = c0 - length = dxrect*cm_to_m/radius*rad_to_deg - -! work_g1(1,:) = -55._dbl_kind ! Weddell Sea - work_g1(1,:) = -156.5_dbl_kind ! Barrow AK - - do j = 1, ny_global - do i = 2, nx_global - work_g1(i,j) = work_g1(i-1,j) + length ! ULON - enddo - enddo - work_g1(:,:) = work_g1(:,:) / rad_to_deg - endif - call gridbox_verts(work_g1,lont_bounds) - call scatter_global(ULON, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULON, distrb_info, & - ew_boundary_type, ns_boundary_type) - - if (my_task == master_task) then - work_g1 = c0 - length = dyrect*cm_to_m/radius*rad_to_deg - -! work_g1(:,1) = -75._dbl_kind ! Weddell Sea - work_g1(:,1) = 71.35_dbl_kind ! Barrow AK + if (scale_dxdy) then + ! scale grid spacing from center outward. + ! this different than original method in it + ! needs to define grid spacing before lat/lon. + ! original rectgrid defines latlon first + call rectgrid_scale_dxdy + else + ! rectgrid no grid spacing. + ! original method with addition to use namelist lat/lon reference + + if (my_task == master_task) then + work_g1 = c0 + length = dxrect*cm_to_m/radius*rad_to_deg + + work_g1(1,:) = lonrefrect ! reference lon from namelist + + do j = 1, ny_global + do i = 2, nx_global + work_g1(i,j) = work_g1(i-1,j) + length ! ULON + enddo + enddo + work_g1(:,:) = work_g1(:,:) / rad_to_deg + endif + call scatter_global(ULON, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULON, distrb_info, & + ew_boundary_type, ns_boundary_type) + + if (my_task == master_task) then + work_g1 = c0 + length = dyrect*cm_to_m/radius*rad_to_deg + + work_g1(:,1) = latrefrect ! reference latitude from namelist + + do i = 1, nx_global + do j = 2, ny_global + work_g1(i,j) = work_g1(i,j-1) + length ! ULAT + enddo + enddo + work_g1(:,:) = work_g1(:,:) / rad_to_deg + endif + call scatter_global(ULAT, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULAT, distrb_info, & + ew_boundary_type, ns_boundary_type) - do i = 1, nx_global - do j = 2, ny_global - work_g1(i,j) = work_g1(i,j-1) + length ! ULAT - enddo - enddo - work_g1(:,:) = work_g1(:,:) / rad_to_deg - endif - call gridbox_verts(work_g1,latt_bounds) - call scatter_global(ULAT, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULAT, distrb_info, & - ew_boundary_type, ns_boundary_type) + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g1(i,j) = dxrect ! HTN + enddo + enddo + endif + call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g1(i,j) = dxrect ! HTN - enddo - enddo - endif - call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g1(i,j) = dyrect ! HTE + enddo + enddo + endif + call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g1(i,j) = dyrect ! HTE - enddo - enddo - endif - call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE + endif ! scale_dxdy !----------------------------------------------------------------- ! Construct T-cell land mask @@ -1526,6 +1531,168 @@ subroutine rectgrid end subroutine rectgrid +!======================================================================= + + subroutine rectgrid_scale_dxdy + + ! generate a variable spaced rectangluar grid. + ! extend spacing from center of grid outward. + use ice_constants, only: c0, c1, c2, radius, cm_to_m, & + field_loc_center, field_loc_NEcorner, field_type_scalar + + integer (kind=int_kind) :: & + i, j, iblk, & + imid, jmid, & + center1, center2 ! array centers for expanding dx, dy + + real (kind=dbl_kind) :: & + length, & + rad_to_deg + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + character(len=*), parameter :: subname = '(rectgrid_scale_dxdy)' + + call icepack_query_parameters(rad_to_deg_out=rad_to_deg) + + allocate(work_g1(nx_global,ny_global)) + + ! determine dx spacing + ! strategy: initialize with dxrect. + ! if want to scale the grid, work from center outwards, + ! multplying neighbor cell by scale factor. + ! this assumes dx varies in x direction only. + ! (i.e, dx is the same across same y location) + if (my_task == master_task) then + + ! initialize with initial dxrect + work_g1(:,:) = dxrect + + ! check if nx is even or odd + ! if even, middle 2 columns are center + ! of odd, middle 1 column is center + if (mod(nx_global,2) == 0) then ! nx_global is even + + ! with even number of x locatons, + ! the center two y columns are center + center1 = nx_global/2 ! integer math + center2 = center1 + 1 ! integer math + + else ! nx_global = odd + ! only one center index. set center2=center1 + center1 = ceiling(real(nx_global/2),int_kind) + center2 = center1 + endif + + ! note loop over only half the x grid points (center1)-1 + ! working from the center outward. + do j = 1, ny_global + do i = 1, center1-1 + ! work from center1 to left + work_g1(center1-i,j) = dxscale*work_g1(center1-i+1,j) + + ! work from center2 to right + work_g1(center2+i,j) = dxscale*work_g1(center2+i-1,j) + enddo ! i + enddo ! j + + endif ! my_task == master_task + + + ! note work_g1 is converted to meters in primary_grid_lengths_HTN + call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE + + ! make ULON array + if (my_task == master_task) then + + ! make first column reference lon in radians. + ! the remaining work_g1 is still dx in meters + work_g1(1,:) = lonrefrect/rad_to_deg ! radians + + ! loop over remaining points and add spacing to successive + ! x locations + do j = 1, ny_global + do i = 2, nx_global ! start from i=2. i=1 is lonrefrect + length = work_g1(i,j)/radius ! grid spacing in radians + work_g1(i,j) = work_g1(i-1,j) + length ! ULON + enddo ! i + enddo ! j + endif ! mytask == master_task + call scatter_global(ULON, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULON, distrb_info, & + ew_boundary_type, ns_boundary_type) + + ! determine dy spacing + ! strategy: initialize with dyrect. + ! if want to scale the grid, work from center outwards, + ! multplying neighbor cell by scale factor. + ! this assumes dy varies in y direction only. + ! (i.e, dy is the same across same x location) + if (my_task == master_task) then + + ! initialize with initial dxrect + work_g1(:,:) = dyrect + + ! check if ny is even or odd + ! if even, middle 2 rows are center + ! of odd, middle 1 row is center + if (mod(ny_global,2) == 0) then ! ny_global is even + + ! with even number of x locatons, + ! the center two y columns are center + center1 = ny_global/2 ! integer math + center2 = center1 + 1 ! integer math + + else ! ny_global = odd + ! only one center index. set center2=center1 + center1 = ceiling(real(ny_global/2),int_kind) + center2 = center1 + endif + + ! note loop over only half the y grid points (center1)-1 + ! working from the center outward. + do i = 1, nx_global + do j = 1, center1-1 + ! work from center1 to bottom + work_g1(i,center1-j) = dyscale*work_g1(i,center1-j+1) + + ! work from center2 to top + work_g1(i,center2+j) = dyscale*work_g1(i,center2+j-1) + enddo ! i + enddo ! j + endif ! mytask == master_task + ! note work_g1 is converted to meters primary_grid_lengths_HTE + call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE + + ! make ULAT array + if (my_task == master_task) then + + ! make first row reference lat in radians. + ! the remaining work_g1 is still dy in meters + work_g1(:,1) = latrefrect/rad_to_deg ! radians + + + ! loop over remaining points and add spacing to successive + ! x locations + do j = 2, ny_global ! start from j=2. j=1 is latrefrect + do i = 1, nx_global + length = work_g1(i,j)/radius ! grid spacing in radians + work_g1(i,j) = work_g1(i,j-1) + length ! ULAT + enddo ! i + enddo ! j + endif ! mytask == master_task + call scatter_global(ULAT, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULAT, distrb_info, & + ew_boundary_type, ns_boundary_type) + + + deallocate(work_g1) + + end subroutine rectgrid_scale_dxdy + !======================================================================= ! Complex land mask for testing box cases @@ -2513,13 +2680,23 @@ subroutine grid_average_X2Y_NEversion(type,work1a,grid1a,work1b,grid1b,work2,gri ! state masked case('NE2US') - call grid_average_X2YS_2('NE2US',work1a,narea,npm,work1b,earea,epm,work2) + call grid_average_X2Y_2('NE2US',work1a,narea,npm,work1b,earea,epm,work2) case('EN2US') - call grid_average_X2YS_2('NE2US',work1b,narea,npm,work1a,earea,epm,work2) + call grid_average_X2Y_2('NE2US',work1b,narea,npm,work1a,earea,epm,work2) case('NE2TS') - call grid_average_X2YS_2('NE2TS',work1a,narea,npm,work1b,earea,epm,work2) + call grid_average_X2Y_2('NE2TS',work1a,narea,npm,work1b,earea,epm,work2) case('EN2TS') - call grid_average_X2YS_2('NE2TS',work1b,narea,npm,work1a,earea,epm,work2) + call grid_average_X2Y_2('NE2TS',work1b,narea,npm,work1a,earea,epm,work2) + + ! state unmasked + case('NE2UA') + call grid_average_X2Y_2('NE2UA',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2UA') + call grid_average_X2Y_2('NE2UA',work1b,narea,npm,work1a,earea,epm,work2) + case('NE2TA') + call grid_average_X2Y_2('NE2TA',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2TA') + call grid_average_X2Y_2('NE2TA',work1b,narea,npm,work1a,earea,epm,work2) case default call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) @@ -3413,36 +3590,6 @@ subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) end subroutine grid_average_X2YF -!======================================================================= -! Compute the minimum of adjacent values of a field at specific indices, -! depending on the grid location (U, E, N) -! - real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) result(mini) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - field ! field defined at T point - - integer (kind=int_kind), intent(in) :: & - i, j - - character(len=*), intent(in) :: & - grid_location ! grid location at which to compute the minumum (U, E, N) - - character(len=*), parameter :: subname = '(grid_neighbor_min)' - - select case (trim(grid_location)) - case('U') - mini = min(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) - case('E') - mini = min(field(i,j), field(i+1,j)) - case('N') - mini = min(field(i,j), field(i,j+1)) - case default - call abort_ice(subname // ' unknown grid_location: ' // grid_location) - end select - - end function grid_neighbor_min - !======================================================================= ! Shifts quantities from one grid to another ! State masked version, simple weighted averager @@ -3451,7 +3598,7 @@ end function grid_neighbor_min ! ! author: T. Craig - subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) + subroutine grid_average_X2Y_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) use ice_constants, only: c0 @@ -3478,7 +3625,7 @@ subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,wor type (block) :: & this_block ! block information for current block - character(len=*), parameter :: subname = '(grid_average_X2YS_2)' + character(len=*), parameter :: subname = '(grid_average_X2Y_2)' work2(:,:,:) = c0 @@ -3534,11 +3681,91 @@ subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,wor enddo !$OMP END PARALLEL DO + case('NE2UA') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1a(i ,j ,iblk) & + + wght1a(i+1,j ,iblk) & + + wght1b(i ,j ,iblk) & + + wght1b(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + work1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & + + work1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & + + work1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NE2TA') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1a(i ,j-1,iblk) & + + wght1a(i ,j ,iblk) & + + wght1b(i-1,j ,iblk) & + + wght1b(i ,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & + + work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + work1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & + + work1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + case default call abort_ice(subname//'ERROR: unknown option '//trim(dir)) end select - end subroutine grid_average_X2YS_2 + end subroutine grid_average_X2Y_2 + +!======================================================================= +! Compute the minimum of adjacent values of a field at specific indices, +! depending on the grid location (U, E, N) +! + real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) result(mini) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + field ! field defined at T point + + integer (kind=int_kind), intent(in) :: & + i, j + + character(len=*), intent(in) :: & + grid_location ! grid location at which to compute the minumum (U, E, N) + + character(len=*), parameter :: subname = '(grid_neighbor_min)' + + select case (trim(grid_location)) + case('U') + mini = min(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) + case('E') + mini = min(field(i,j), field(i+1,j)) + case('N') + mini = min(field(i,j), field(i,j+1)) + case default + call abort_ice(subname // ' unknown grid_location: ' // grid_location) + end select + + end function grid_neighbor_min !======================================================================= ! Compute the maximum of adjacent values of a field at specific indices, diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 0ee55d2e5..29477973a 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -150,11 +150,7 @@ subroutine init_restart_write(filename_spec) solve_zsal, skl_bgc, z_tracers logical (kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & tr_bgc_Sil, tr_bgc_DMS, & @@ -190,11 +186,7 @@ subroutine init_restart_write(filename_spec) call icepack_query_tracer_sizes(nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine, & tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & @@ -394,13 +386,6 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'vlvl',dims) end if -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - call define_rest_field(File,'apnd',dims) - call define_rest_field(File,'hpnd',dims) - end if -#endif - if (tr_pond_topo) then call define_rest_field(File,'apnd',dims) call define_rest_field(File,'hpnd',dims) diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index 87dc8d9a1..0b8ed689e 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -235,17 +235,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -260,11 +253,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -283,11 +272,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -346,21 +331,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 index 91f7985bd..b67e1a223 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 @@ -145,11 +145,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_bgc, write_restart_hbrine use ice_restart_driver, only: dumpfile @@ -172,11 +168,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -185,11 +177,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_fsd_out=tr_fsd) call icepack_warnings_flush(nu_diag) @@ -333,9 +321,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_fsd) call write_restart_fsd diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index 87dc8d9a1..0b8ed689e 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -235,17 +235,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -260,11 +253,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -283,11 +272,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -346,21 +331,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 index ea6a65165..c9875d769 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 @@ -145,11 +145,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_bgc, write_restart_hbrine use ice_restart_driver, only: dumpfile @@ -172,11 +168,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -185,11 +177,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_fsd_out=tr_fsd) call icepack_warnings_flush(nu_diag) @@ -333,9 +321,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_fsd) call write_restart_fsd diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index cfc5bece9..a8bf96ad2 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -264,17 +264,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -291,11 +284,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -315,11 +304,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -381,21 +366,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 index b96086c6d..5836479b4 100644 --- a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 @@ -137,11 +137,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -166,11 +162,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -188,11 +180,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -403,9 +391,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/mct/cesm1/ice_import_export.F90 b/cicecore/drivers/mct/cesm1/ice_import_export.F90 index 7ac4f0bb7..868ed42b4 100644 --- a/cicecore/drivers/mct/cesm1/ice_import_export.F90 +++ b/cicecore/drivers/mct/cesm1/ice_import_export.F90 @@ -9,7 +9,7 @@ module ice_import_export use ice_constants , only: field_type_vector, c100 use ice_constants , only: p001, p5 use ice_blocks , only: block, get_block, nx_block, ny_block - use ice_flux , only: strairxT, strairyT, strocnxT, strocnyT + use ice_flux , only: strairxT, strairyT, strocnxT_iavg, strocnyT_iavg use ice_flux , only: alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only: flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only: fresh, fsalt, zlvl, uatm, vatm, potT, Tair, Qa @@ -571,8 +571,8 @@ subroutine ice_export( i2x ) + workx*sin(ANGLET(i,j,iblk)) ! ice/ocean stress (on POP T-grid: convert to lat-lon) - workx = -strocnxT(i,j,iblk) ! N/m^2 - worky = -strocnyT(i,j,iblk) ! N/m^2 + workx = -strocnxT_iavg(i,j,iblk) ! N/m^2 + worky = -strocnyT_iavg(i,j,iblk) ! N/m^2 tauxo(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & - worky*sin(ANGLET(i,j,iblk)) tauyo(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & diff --git a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 index 78b7d15c4..599249083 100644 --- a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 +++ b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 @@ -585,11 +585,11 @@ subroutine ice_prescribed_phys ! set non-computed fluxes, ice velocities, ice-ocn stresses to zero !-------------------------------------------------------------------- - frzmlt (:,:,:) = c0 - uvel (:,:,:) = c0 - vvel (:,:,:) = c0 - strocnxT (:,:,:) = c0 - strocnyT (:,:,:) = c0 + frzmlt (:,:,:) = c0 + uvel (:,:,:) = c0 + vvel (:,:,:) = c0 + strocnxT_iavg(:,:,:) = c0 + strocnyT_iavg(:,:,:) = c0 !----------------------------------------------------------------- ! other atm and ocn fluxes diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index 3d5e5cc2a..5fbde9cce 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -222,17 +222,10 @@ subroutine init_restart() use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -249,11 +242,7 @@ subroutine init_restart() i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, tr_snow, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -274,11 +263,7 @@ subroutine init_restart() call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -340,21 +325,6 @@ subroutine init_restart() enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 index 1dc2328f7..3929fd6ad 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 @@ -123,11 +123,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -152,11 +148,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -174,11 +166,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -384,9 +372,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 index 182308973..afdee5590 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 @@ -17,12 +17,13 @@ module ice_comp_nuopc use NUOPC_Model , only : NUOPC_ModelGet, SetVM use ice_constants , only : ice_init_constants, c0 use ice_shr_methods , only : chkerr, state_setscalar, state_getscalar, state_diagnose, alarmInit - use ice_shr_methods , only : set_component_logging, get_component_instance, state_flddebug + use ice_shr_methods , only : get_component_instance, state_flddebug + use ice_import_export , only : ice_import, ice_export, ice_advertise_fields, ice_realize_fields use ice_domain_size , only : nx_global, ny_global use ice_grid , only : grid_type, init_grid2 use ice_communicate , only : init_communicate, my_task, master_task, mpi_comm_ice - use ice_calendar , only : force_restart_now, write_ic + use ice_calendar , only : force_restart_now, write_ic, init_calendar use ice_calendar , only : idate, mday, mmonth, myear, year_init use ice_calendar , only : msec, dt, calendar, calendar_type, nextsw_cday, istep use ice_calendar , only : ice_calendar_noleap, ice_calendar_gregorian @@ -40,7 +41,10 @@ module ice_comp_nuopc #ifdef CESMCOUPLED use shr_const_mod use shr_orb_mod , only : shr_orb_decl, shr_orb_params, SHR_ORB_UNDEF_REAL, SHR_ORB_UNDEF_INT - use ice_scam , only : scmlat, scmlon, scol_mask, scol_frac, scol_ni, scol_nj + use ice_scam , only : scmlat, scmlon, scol_mask, scol_frac, scol_ni, scol_nj, scol_area + use nuopc_shr_methods , only : set_component_logging +#else + use ice_shr_methods , only : set_component_logging #endif use ice_timers use CICE_InitMod , only : cice_init1, cice_init2 @@ -87,6 +91,15 @@ module ice_comp_nuopc integer :: dbug = 0 logical :: profile_memory = .false. + logical :: mastertask + integer :: start_ymd ! Start date (YYYYMMDD) + integer :: start_tod ! start time of day (s) + integer :: curr_ymd ! Current date (YYYYMMDD) + integer :: curr_tod ! Current time of day (s) + integer :: stop_ymd ! stop date (YYYYMMDD) + integer :: stop_tod ! stop time of day (sec) + integer :: ref_ymd ! Reference date (YYYYMMDD) + integer :: ref_tod ! reference time of day (s) integer , parameter :: debug_import = 0 ! internal debug level integer , parameter :: debug_export = 0 ! internal debug level character(*), parameter :: modName = "(ice_comp_nuopc)" @@ -201,12 +214,8 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) real(kind=dbl_kind) :: atmiter_conv_driver integer (kind=int_kind) :: natmiter integer (kind=int_kind) :: natmiter_driver - character(len=char_len) :: tfrz_option_driver ! tfrz_option from driver attributes - character(len=char_len) :: tfrz_option ! tfrz_option from cice namelist - integer(int_kind) :: ktherm integer :: localPet integer :: npes - logical :: mastertask type(ESMF_VM) :: vm integer :: lmpicom ! local communicator type(ESMF_Time) :: currTime ! Current time @@ -215,14 +224,6 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) type(ESMF_Time) :: refTime ! Ref time type(ESMF_TimeInterval) :: timeStep ! Model timestep type(ESMF_CalKind_Flag) :: esmf_caltype ! esmf calendar type - integer :: start_ymd ! Start date (YYYYMMDD) - integer :: start_tod ! start time of day (s) - integer :: curr_ymd ! Current date (YYYYMMDD) - integer :: curr_tod ! Current time of day (s) - integer :: stop_ymd ! stop date (YYYYMMDD) - integer :: stop_tod ! stop time of day (sec) - integer :: ref_ymd ! Reference date (YYYYMMDD) - integer :: ref_tod ! reference time of day (s) integer :: yy,mm,dd ! Temporaries for time query integer :: dtime ! time step integer :: shrlogunit ! original log unit @@ -232,12 +233,15 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) integer :: iblk, jblk ! indices integer :: ig, jg ! indices integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain + character(len=char_len_long) :: diag_filename = 'unset' character(len=char_len_long) :: logmsg character(len=char_len_long) :: single_column_lnd_domainfile real(dbl_kind) :: scol_lon real(dbl_kind) :: scol_lat real(dbl_kind) :: scol_spval + character(len=char_len) :: tfrz_option ! tfrz_option from cice namelist + character(len=char_len) :: tfrz_option_driver ! tfrz_option from cice namelist character(len=*), parameter :: subname=trim(modName)//':(InitializeAdvertise) ' !-------------------------------- @@ -541,12 +545,20 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call NUOPC_CompAttributeGet(gcomp, name='scol_nj', value=cvalue, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return read(cvalue,*) scol_nj + call NUOPC_CompAttributeGet(gcomp, name='scol_area', value=cvalue, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) scol_area call ice_mesh_create_scolumn(scmlon, scmlat, ice_mesh, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return scol_valid = (scol_mask == 1) if (.not. scol_valid) then + ! Read the cice namelist as part of the call to cice_init1 + ! Note that if single_column is true and scol_valid is not - will never get here + call t_startf ('cice_init1') + call cice_init1 + call t_stopf ('cice_init1') ! Advertise fields call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return @@ -559,13 +571,18 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) RETURN end if end if - ! Read the cice namelist as part of the call to cice_init1 ! Note that if single_column is true and scol_valid is not - will never get here - call t_startf ('cice_init1') call cice_init1 call t_stopf ('cice_init1') + + !----------------------------------------------------------------- + ! Advertise fields + !----------------------------------------------------------------- + call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + ! Form of ocean freezing temperature ! 'minus1p8' = -1.8 C @@ -624,6 +641,12 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call cice_init1 call t_stopf ('cice_init1') + !----------------------------------------------------------------- + ! Advertise fields + !----------------------------------------------------------------- + call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + #endif !---------------------------------------------------------------------------- @@ -675,6 +698,43 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) end if end if + call t_stopf ('cice_init_total') + + end subroutine InitializeAdvertise + + !=============================================================================== + + subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) + + ! Arguments + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState + type(ESMF_State) :: exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! Local variables + integer :: n + integer :: fieldcount + type(ESMF_Field) :: lfield + character(len=char_len_long) :: cvalue + real(dbl_kind) :: scol_lon + real(dbl_kind) :: scol_lat + real(dbl_kind) :: scol_spval + real(dbl_kind), pointer :: fldptr1d(:) + real(dbl_kind), pointer :: fldptr2d(:,:) + integer :: rank + character(len=char_len) :: tfrz_option ! tfrz_option from cice namelist + integer(int_kind) :: ktherm + + character(len=char_len_long) :: single_column_lnd_domainfile + character(len=char_len_long) , pointer :: lfieldnamelist(:) => null() + character(len=*), parameter :: subname=trim(modName)//':(InitializeRealize) ' + !-------------------------------- + + rc = ESMF_SUCCESS + if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) + !---------------------------------------------------------------------------- ! Second cice initialization phase -after initializing grid info !---------------------------------------------------------------------------- @@ -685,29 +745,6 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call t_startf ('cice_init2') call cice_init2() call t_stopf ('cice_init2') - - !---------------------------------------------------------------------------- - ! reset shr logging to my log file - !---------------------------------------------------------------------------- - - call icepack_query_parameters(ktherm_out=ktherm) - call icepack_query_parameters(tfrz_option_out=tfrz_option) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - ! Now write output to nu_diag - this must happen AFTER call to cice_init - if (mastertask) then - write(nu_diag,'(a,d21.14)') trim(subname)//' cice init nextsw_cday = ',nextsw_cday - write(nu_diag,'(a)') trim(subname)//' tfrz_option = '//trim(tfrz_option) - if (ktherm == 2 .and. trim(tfrz_option) /= 'mushy') then - write(nu_diag,*) trim(subname),' Warning: Using ktherm = 2 and tfrz_option = ', trim(tfrz_option) - endif - write(nu_diag,'(a )') trim(subname)//' inst_name = '//trim(inst_name) - write(nu_diag,'(a,i8 )') trim(subname)//' inst_index = ',inst_index - write(nu_diag,'(a )') trim(subname)//' inst_suffix = ',trim(inst_suffix) - endif - !--------------------------------------------------------------------------- ! use EClock to reset calendar information on initial start !--------------------------------------------------------------------------- @@ -758,6 +795,30 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) end if call calendar() ! update calendar info + + !---------------------------------------------------------------------------- + ! reset shr logging to my log file + !---------------------------------------------------------------------------- + + call icepack_query_parameters(ktherm_out=ktherm) + call icepack_query_parameters(tfrz_option_out=tfrz_option) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + ! Now write output to nu_diag - this must happen AFTER call to cice_init + if (mastertask) then + write(nu_diag,'(a,d21.14)') trim(subname)//' cice init nextsw_cday = ',nextsw_cday + write(nu_diag,'(a)') trim(subname)//' tfrz_option = '//trim(tfrz_option) + if (ktherm == 2 .and. trim(tfrz_option) /= 'mushy') then + write(nu_diag,*) trim(subname),' Warning: Using ktherm = 2 and tfrz_option = ', trim(tfrz_option) + endif + write(nu_diag,'(a )') trim(subname)//' inst_name = '//trim(inst_name) + write(nu_diag,'(a,i8 )') trim(subname)//' inst_index = ',inst_index + write(nu_diag,'(a )') trim(subname)//' inst_suffix = ',trim(inst_suffix) + endif + + if (write_ic) then call accum_hist(dt) ! write initial conditions end if @@ -769,50 +830,6 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call ice_prescribed_init(clock, ice_mesh, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return - !----------------------------------------------------------------- - ! Advertise fields - !----------------------------------------------------------------- - - ! NOTE: the advertise phase needs to be called after the ice - ! initialization since the number of ice categories is needed for - ! ice_fraction_n and mean_sw_pen_to_ocn_ifrac_n - call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call t_stopf ('cice_init_total') - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! Arguments - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState - type(ESMF_State) :: exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! Local variables - integer :: n - integer :: fieldcount - type(ESMF_Field) :: lfield - character(len=char_len_long) :: cvalue - real(dbl_kind) :: scol_lon - real(dbl_kind) :: scol_lat - real(dbl_kind) :: scol_spval - real(dbl_kind), pointer :: fldptr1d(:) - real(dbl_kind), pointer :: fldptr2d(:,:) - integer :: rank - character(len=char_len_long) :: single_column_lnd_domainfile - character(len=char_len_long) , pointer :: lfieldnamelist(:) => null() - character(len=*), parameter :: subname=trim(modName)//':(InitializeRealize) ' - !-------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - #ifdef CESMCOUPLED ! if single column is not valid - set all export state fields to zero and return if (single_column .and. .not. scol_valid) then @@ -848,7 +865,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ! *** RETURN HERE *** ! ******************* RETURN - else + else if(single_column) then write(nu_diag,'(a,3(f10.5,2x))')' (ice_comp_nuopc) single column mode lon/lat/frac is ',& scmlon,scmlat,scol_frac end if diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index fdf7c80c3..2597dd88c 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -11,7 +11,7 @@ module ice_import_export use ice_domain_size , only : nx_global, ny_global, block_size_x, block_size_y, max_blocks, ncat use ice_domain_size , only : nfreq, nfsd use ice_exit , only : abort_ice - use ice_flux , only : strairxT, strairyT, strocnxT, strocnyT + use ice_flux , only : strairxT, strairyT, strocnxT_iavg, strocnyT_iavg use ice_flux , only : alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only : flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only : fswthru_vdr, fswthru_vdf, fswthru_idr, fswthru_idf @@ -36,9 +36,9 @@ module ice_import_export use ice_shr_methods , only : chkerr, state_reset use icepack_intfc , only : icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc , only : icepack_query_parameters, icepack_query_tracer_flags - use icepack_intfc , only : icepack_query_tracer_indices use icepack_intfc , only : icepack_liquidus_temperature use icepack_intfc , only : icepack_sea_freezing_temperature + use icepack_intfc , only : icepack_query_tracer_indices use icepack_parameters , only : puny, c2 use cice_wrapper_mod , only : t_startf, t_stopf, t_barrierf #ifdef CESMCOUPLED @@ -307,6 +307,7 @@ end subroutine ice_advertise_fields !============================================================================== subroutine ice_realize_fields(gcomp, mesh, flds_scalar_name, flds_scalar_num, rc) + use ice_scam, only : single_column ! input/output variables type(ESMF_GridComp) :: gcomp @@ -320,7 +321,7 @@ subroutine ice_realize_fields(gcomp, mesh, flds_scalar_name, flds_scalar_num, rc type(ESMF_State) :: exportState type(ESMF_Field) :: lfield integer :: numOwnedElements - integer :: i, j, iblk, n, k + integer :: i, j, iblk, n integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain type(block) :: this_block ! block information for current block real(dbl_kind), allocatable :: mesh_areas(:) @@ -361,10 +362,10 @@ subroutine ice_realize_fields(gcomp, mesh, flds_scalar_name, flds_scalar_num, rc tag=subname//':CICE_Import',& mesh=mesh, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return - #ifdef CESMCOUPLED ! Get mesh areas from second field - using second field since the ! first field is the scalar field + if (single_column) return call ESMF_MeshGet(mesh, numOwnedElements=numOwnedElements, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return @@ -897,6 +898,8 @@ end subroutine ice_import !=============================================================================== subroutine ice_export( exportState, rc ) + use ice_scam, only : single_column + ! input/output variables type(ESMF_State), intent(inout) :: exportState integer , intent(out) :: rc @@ -919,12 +922,13 @@ subroutine ice_export( exportState, rc ) real (kind=dbl_kind) :: ailohi(nx_block,ny_block,max_blocks) ! fractional ice area real (kind=dbl_kind) :: floediam(nx_block,ny_block,max_blocks) real (kind=dbl_kind) :: floethick(nx_block,ny_block,max_blocks) ! ice thickness - real (kind=dbl_kind) :: Tffresh logical (kind=log_kind) :: tr_fsd integer (kind=int_kind) :: nt_fsd + real (kind=dbl_kind) :: Tffresh real (kind=dbl_kind), allocatable :: tempfld(:,:,:) real (kind=dbl_kind), pointer :: dataptr_ifrac_n(:,:) real (kind=dbl_kind), pointer :: dataptr_swpen_n(:,:) + logical (kind=log_kind), save :: first_call = .true. character(len=*),parameter :: subname = 'ice_export' !----------------------------------------------------- @@ -971,6 +975,9 @@ subroutine ice_export( exportState, rc ) ! ice fraction ailohi(i,j,iblk) = min(aice(i,j,iblk), c1) + ! surface temperature + Tsrf(i,j,iblk) = Tffresh + trcr(i,j,1,iblk) !Kelvin (original ???) + if (tr_fsd) then ! floe thickness (m) if (aice(i,j,iblk) > puny) then @@ -992,9 +999,6 @@ subroutine ice_export( exportState, rc ) floediam(i,j,iblk) = MAX(c2*floe_rad_c(1),workx) endif - ! surface temperature - Tsrf(i,j,iblk) = Tffresh + trcr(i,j,1,iblk) !Kelvin (original ???) - ! wind stress (on POP T-grid: convert to lat-lon) workx = strairxT(i,j,iblk) ! N/m^2 worky = strairyT(i,j,iblk) ! N/m^2 @@ -1002,8 +1006,8 @@ subroutine ice_export( exportState, rc ) tauya(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) + workx*sin(ANGLET(i,j,iblk)) ! ice/ocean stress (on POP T-grid: convert to lat-lon) - workx = -strocnxT(i,j,iblk) ! N/m^2 - worky = -strocnyT(i,j,iblk) ! N/m^2 + workx = -strocnxT_iavg(i,j,iblk) ! N/m^2 + worky = -strocnyT_iavg(i,j,iblk) ! N/m^2 tauxo(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) - worky*sin(ANGLET(i,j,iblk)) tauyo(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) + workx*sin(ANGLET(i,j,iblk)) enddo @@ -1050,8 +1054,11 @@ subroutine ice_export( exportState, rc ) !--------------------------------- ! Zero out fields with tmask for proper coupler accumulation in ice free areas - call state_reset(exportState, c0, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (first_call .or. .not.single_column) then + call state_reset(exportState, c0, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + first_call = .false. + endif ! Create a temporary field allocate(tempfld(nx_block,ny_block,nblocks)) diff --git a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 index 17dfb86ac..688b6066d 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 @@ -427,7 +427,7 @@ subroutine ice_mesh_create_scolumn(scol_lon, scol_lat, ice_mesh, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Allocate module variable ocn_gridcell_frac - allocate(ocn_gridcell_frac(nx_block,ny_block,max_blocks)) + allocate(ocn_gridcell_frac(2,2,1)) ocn_gridcell_frac(:,:,:) = scol_frac end subroutine ice_mesh_create_scolumn @@ -560,7 +560,7 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) ! Check CICE mesh use ice_constants, only : c1,c0,c360 - use ice_grid , only : tlon, tlat + use ice_grid , only : tlon, tlat, hm ! input/output parameters type(ESMF_GridComp) , intent(inout) :: gcomp @@ -569,7 +569,8 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) ! local variables type(ESMF_DistGrid) :: distGrid - integer :: n,c,g,i,j,m ! indices + type(ESMF_Array) :: elemMaskArray + integer :: n,i,j ! indices integer :: iblk, jblk ! indices integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain type(block) :: this_block ! block information for current block @@ -578,11 +579,15 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) real(dbl_kind), pointer :: ownedElemCoords(:) real(dbl_kind), pointer :: lat(:), latMesh(:) real(dbl_kind), pointer :: lon(:), lonMesh(:) + integer , pointer :: model_mask(:) real(dbl_kind) :: diff_lon real(dbl_kind) :: diff_lat real(dbl_kind) :: rad_to_deg real(dbl_kind) :: tmplon, eps_imesh logical :: isPresent, isSet + logical :: mask_error + integer :: mask_internal + integer :: mask_file character(len=char_len_long) :: cvalue character(len=char_len_long) :: logmsg character(len=*), parameter :: subname = ' ice_mesh_check: ' @@ -606,7 +611,7 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) allocate(ownedElemCoords(spatialDim*numownedelements)) allocate(lonmesh(numOwnedElements)) allocate(latmesh(numOwnedElements)) - call ESMF_MeshGet(ice_mesh, ownedElemCoords=ownedElemCoords) + call ESMF_MeshGet(ice_mesh, ownedElemCoords=ownedElemCoords, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return do n = 1,numOwnedElements lonMesh(n) = ownedElemCoords(2*n-1) @@ -650,8 +655,45 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) enddo enddo -100 format('ERROR: CICE n, lonmesh, lon, diff_lon = ',i6,2(f21.13,3x),d21.5) -101 format('ERROR: CICE n, latmesh, lat, diff_lat = ',i6,2(f21.13,3x),d21.5) + ! obtain internally generated ice mask for error checks + allocate(model_mask(numOwnedElements)) + call ESMF_MeshGet(ice_mesh, elementdistGrid=distGrid, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + elemMaskArray = ESMF_ArrayCreate(distGrid, model_mask, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_MeshGet(ice_mesh, elemMaskArray=elemMaskArray, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + mask_error = .false. + n=0 + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + do j = jlo, jhi + jlo = this_block%jlo + jhi = this_block%jhi + do i = ilo, ihi + ilo = this_block%ilo + ihi = this_block%ihi + n = n+1 + mask_internal = nint(hm(i,j,iblk),kind=dbl_kind) + mask_file = model_mask(n) + if (mask_internal /= mask_file) then + write(6,102) n,mask_internal,mask_file + mask_error = .true. + end if + enddo !i + enddo !j + enddo !iblk + if (mask_error) then + call abort_ice(error_message=subname, file=__FILE__, line=__LINE__) + end if + + call ESMF_ArrayDestroy(elemMaskArray, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + +100 format('ERROR: CICE n, mesh_lon , lon, diff_lon = ',i8,2(f21.13,3x),d21.5) +101 format('ERROR: CICE n, mesh_lat , lat, diff_lat = ',i8,2(f21.13,3x),d21.5) +102 format('ERROR: CICE n, mesh_internal, mask_file = ',i8,2(i2,2x)) ! deallocate memory deallocate(ownedElemCoords) diff --git a/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 index 84973e9dd..06b090ece 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 @@ -472,11 +472,11 @@ subroutine ice_prescribed_phys() ! set non-computed fluxes, ice velocities, ice-ocn stresses to zero !-------------------------------------------------------------------- - frzmlt (:,:,:) = c0 - uvel (:,:,:) = c0 - vvel (:,:,:) = c0 - strocnxT (:,:,:) = c0 - strocnyT (:,:,:) = c0 + frzmlt (:,:,:) = c0 + uvel (:,:,:) = c0 + vvel (:,:,:) = c0 + strocnxT_iavg(:,:,:) = c0 + strocnyT_iavg(:,:,:) = c0 !----------------------------------------------------------------- ! other atm and ocn fluxes diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index 78d462d4c..22596429d 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -271,17 +271,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -298,11 +291,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -322,11 +311,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -388,21 +373,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 index 6e799723e..77bb7738e 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 @@ -157,11 +157,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -184,11 +180,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -206,11 +198,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -412,9 +400,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index f9c8f30ae..f4a7a2ef1 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -255,17 +255,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -282,11 +275,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -306,11 +295,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -372,21 +357,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index 275846d55..7f38cd014 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -149,11 +149,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -176,11 +172,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -198,11 +190,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -403,9 +391,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index ad355d783..84d1a3a60 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -241,17 +241,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -267,11 +260,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -290,11 +279,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -354,21 +339,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index ad355d783..84d1a3a60 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -241,17 +241,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -267,11 +260,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -290,11 +279,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -354,21 +339,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/shared/ice_arrays_column.F90 b/cicecore/shared/ice_arrays_column.F90 index c9e8be8db..b4727d3fd 100644 --- a/cicecore/shared/ice_arrays_column.F90 +++ b/cicecore/shared/ice_arrays_column.F90 @@ -24,6 +24,7 @@ module ice_arrays_column public :: alloc_arrays_column ! icepack_atmo.F90 + ! Cdn variables on the T-grid real (kind=dbl_kind), public, & dimension (:,:,:), allocatable :: & Cdn_atm , & ! atm drag coefficient diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 5339aa6ec..06ab79cdb 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -44,11 +44,7 @@ module ice_init_column private public :: init_thermo_vertical, init_shortwave, & init_age, init_FY, init_lvl, init_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_aerosol, init_bgc, init_hbrine, init_zbgc, input_zbgc, & count_tracers, init_isotope, init_snowtracers @@ -547,23 +543,6 @@ subroutine init_lvl(iblk, alvl, vlvl) end subroutine init_lvl -#ifdef UNDEPRECATE_CESMPONDS -!======================================================================= - -! Initialize melt ponds. - - subroutine init_meltponds_cesm(apnd, hpnd) - - real(kind=dbl_kind), dimension(:,:,:), intent(out) :: & - apnd , & ! melt pond area fraction - hpnd ! melt pond depth - character(len=*),parameter :: subname='(init_meltponds_cesm)' - - apnd(:,:,:) = c0 - hpnd(:,:,:) = c0 - - end subroutine init_meltponds_cesm -#endif !======================================================================= ! Initialize melt ponds. @@ -1818,11 +1797,7 @@ subroutine count_tracers integer (kind=int_kind) :: ntrcr logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_pond, tr_aero, tr_fsd logical (kind=log_kind) :: tr_snow -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: tr_iso, tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: tr_iso, tr_pond_lvl, tr_pond_topo -#endif integer (kind=int_kind) :: nt_Tsfc, nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY integer (kind=int_kind) :: nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, nt_aero integer (kind=int_kind) :: nt_fsd, nt_isosno, nt_isoice @@ -1907,11 +1882,7 @@ subroutine count_tracers call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_aero_out=tr_aero, tr_pond_out=tr_pond, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_fsd_out=tr_fsd, & tr_snow_out=tr_snow, tr_iso_out=tr_iso, & tr_bgc_Nit_out=tr_bgc_Nit, tr_bgc_Am_out =tr_bgc_Am, tr_bgc_Sil_out=tr_bgc_Sil, & diff --git a/cicecore/shared/ice_restart_column.F90 b/cicecore/shared/ice_restart_column.F90 index f8426cbea..d0a4ef515 100644 --- a/cicecore/shared/ice_restart_column.F90 +++ b/cicecore/shared/ice_restart_column.F90 @@ -29,9 +29,6 @@ module ice_restart_column public :: write_restart_age, read_restart_age, & write_restart_FY, read_restart_FY, & write_restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_pond_cesm, read_restart_pond_cesm, & -#endif write_restart_pond_lvl, read_restart_pond_lvl, & write_restart_pond_topo, read_restart_pond_topo, & write_restart_snow, read_restart_snow, & @@ -45,9 +42,6 @@ module ice_restart_column restart_age , & ! if .true., read age tracer restart file restart_FY , & ! if .true., read FY tracer restart file restart_lvl , & ! if .true., read lvl tracer restart file -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, & ! if .true., read meltponds restart file -#endif restart_pond_lvl , & ! if .true., read meltponds restart file restart_pond_topo, & ! if .true., read meltponds restart file restart_snow , & ! if .true., read snow tracer restart file @@ -260,74 +254,6 @@ subroutine read_restart_lvl() end subroutine read_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS -!======================================================================= -! -! Dumps all values needed for restarting -! -! authors Elizabeth C. Hunke, LANL -! David A. Bailey, NCAR - - subroutine write_restart_pond_cesm() - - use ice_fileunits, only: nu_dump_pond - use ice_state, only: trcrn - - ! local variables - - logical (kind=log_kind) :: diag - integer (kind=int_kind) :: nt_apnd, nt_hpnd - character(len=*),parameter :: subname='(write_restart_pond_cesm)' - - call icepack_query_tracer_indices(nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - diag = .true. - - call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_apnd,:,:),'ruf8', & - 'apnd',ncat,diag) - call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_hpnd,:,:),'ruf8', & - 'hpnd',ncat,diag) - - end subroutine write_restart_pond_cesm - -!======================================================================= - -! Reads all values needed for a meltpond volume restart -! -! authors Elizabeth C. Hunke, LANL -! David A. Bailey, NCAR - - subroutine read_restart_pond_cesm() - - use ice_fileunits, only: nu_restart_pond - use ice_state, only: trcrn - - ! local variables - - logical (kind=log_kind) :: & - diag - integer (kind=int_kind) :: nt_apnd, nt_hpnd - character(len=*),parameter :: subname='(read_restart_pond_cesm)' - - call icepack_query_tracer_indices(nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - diag = .true. - - if (my_task == master_task) write(nu_diag,*) subname,'min/max cesm ponds' - - call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_apnd,:,:),'ruf8', & - 'apnd',ncat,diag,field_loc_center,field_type_scalar) - call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_hpnd,:,:),'ruf8', & - 'hpnd',ncat,diag,field_loc_center,field_type_scalar) - - end subroutine read_restart_pond_cesm -#endif !======================================================================= ! ! Dumps all values needed for restarting diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index fbe172f51..1cf23da45 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -33,6 +33,19 @@ cat >> ${jobfile} << EOFB #PBS -l walltime=${batchtime} EOFB +else if (${ICE_MACHINE} =~ gust*) then +cat >> ${jobfile} << EOFB +#PBS -q ${queue} +#PBS -l job_priority=regular +#PBS -N ${ICE_CASENAME} +#PBS -A ${acct} +#PBS -l select=${nnodes}:ncpus=${corespernode}:mpiprocs=${taskpernodelimit}:ompthreads=${nthrds} +#PBS -l walltime=${batchtime} +#PBS -j oe +#PBS -W umask=022 +#PBS -o ${ICE_CASEDIR} +EOFB + else if (${ICE_MACHINE} =~ hobart*) then cat >> ${jobfile} << EOFB #PBS -j oe @@ -90,8 +103,8 @@ else if (${ICE_MACHINE} =~ nrlssc*) then # nrlssc queue system has nodes with different task per node if (${taskpernode} <= 12) set tpnstr = 'twelve' if (${taskpernode} == 20) set tpnstr = 'twenty' -if (${taskpernode} == 24) set tpnstr = 'twentyfour' -if (${taskpernode} == 28) set tpnstr = 'twentyeight' +if (${taskpernode} >= 24) set tpnstr = 'twentyfour' +#if (${taskpernode} == 28) set tpnstr = 'twentyeight' cat >> ${jobfile} <> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -23,7 +23,19 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ hobart* || ${ICE_MACHINE} =~ izumi*) then +else if (${ICE_MACHCOMP} =~ gust*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpiexec --cpu-bind depth -n ${ntasks} -ppn ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + +#======= +else if (${ICE_MACHCOMP} =~ hobart* || ${ICE_MACHCOMP} =~ izumi*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -35,7 +47,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ gaffney* || ${ICE_MACHINE} =~ koehr* || ${ICE_MACHINE} =~ mustang*) then +else if (${ICE_MACHCOMP} =~ gaffney* || ${ICE_MACHCOMP} =~ koehr* || ${ICE_MACHCOMP} =~ mustang*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -47,13 +59,31 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ onyx* || ${ICE_MACHINE} =~ narwhal) then +else if (${ICE_MACHCOMP} =~ nrlssc*) then +if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR -aprun -n ${ntasks} -N ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + +#======= +else if (${ICE_MACHCOMP} =~ narwhal_*hpcx*) then +cat >> ${jobfile} << EOFR +mpirun -np ${ntasks} -hostfile \$PBS_NODEFILE \${EXTRA_OMPI_SETTINGS} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ cori*) then +else if (${ICE_MACHCOMP} =~ onyx* || ${ICE_MACHCOMP} =~ narwhal*) then +cat >> ${jobfile} << EOFR +aprun -q -n ${ntasks} -N ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE +EOFR + +#======= +else if (${ICE_MACHCOMP} =~ cori*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -65,7 +95,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ compy*) then +else if (${ICE_MACHCOMP} =~ compy*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -77,7 +107,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ badger*) then +else if (${ICE_MACHCOMP} =~ badger*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -89,7 +119,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ fram*) then +else if (${ICE_MACHCOMP} =~ fram*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -101,7 +131,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ cesium*) then +else if (${ICE_MACHCOMP} =~ cesium*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -113,7 +143,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ millikan*) then +else if (${ICE_MACHCOMP} =~ millikan*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -125,7 +155,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ daley* || ${ICE_MACHINE} =~ banting*) then +else if (${ICE_MACHCOMP} =~ daley* || ${ICE_MACHCOMP} =~ banting*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -137,7 +167,44 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ freya*) then +else if (${ICE_MACHCOMP} =~ ppp5* || ${ICE_MACHCOMP} =~ ppp6* || ${ICE_MACHCOMP} =~ robert* || ${ICE_MACHCOMP} =~ underhill*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + +#======= +else if (${ICE_MACHCOMP} =~ ppp3*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +rumpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + +#======= +else if (${ICE_MACHCOMP} =~ gpsc3*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + + +#======= +else if (${ICE_MACHCOMP} =~ freya*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR aprun -n 1 -N 1 -d 1 ./cice >&! \$ICE_RUNLOG_FILE @@ -149,45 +216,45 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ gaea*) then +else if (${ICE_MACHCOMP} =~ gaea*) then cat >> ${jobfile} << EOFR srun -n ${ntasks} -c ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ hera*) then +else if (${ICE_MACHCOMP} =~ hera*) then cat >> ${jobfile} << EOFR srun -n ${ntasks} -c ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ orion*) then +else if (${ICE_MACHCOMP} =~ orion*) then cat >> ${jobfile} << EOFR srun -n ${ntasks} -c ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ high_Sierra*) then +else if (${ICE_MACHCOMP} =~ high_Sierra*) then cat >> ${jobfile} << EOFR mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE #./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ phase3*) then +else if (${ICE_MACHCOMP} =~ phase3*) then cat >> ${jobfile} << EOFR mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE #./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ testmachine*) then +else if (${ICE_MACHCOMP} =~ testmachine*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ travisCI*) then +else if (${ICE_MACHCOMP} =~ travisCI*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -202,7 +269,7 @@ endif #EOFR #======= -else if (${ICE_MACHINE} =~ conda*) then +else if (${ICE_MACHCOMP} =~ conda*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -216,7 +283,7 @@ endif #======= else - echo "${0} ERROR ${ICE_MACHINE} unknown" + echo "${0} ERROR ${ICE_MACHCOMP} unknown" exit -1 endif #======= diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index ec582873a..8262f34ec 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -75,6 +75,11 @@ kcatbound = 0 dxrect = 30.e5 dyrect = 30.e5 + lonrefrect = -156.50d0 + latrefrect = 71.35d0 + scale_dxdy = .false. + dxscale = 1.d0 + dyscale = 1.d0 close_boundaries = .false. ncat = 5 nfsd = 1 @@ -162,7 +167,7 @@ kridge = 1 ktransport = 1 ssh_stress = 'geostrophic' - maxits_nonlin = 4 + maxits_nonlin = 10 precond = 'pgmres' dim_fgmres = 50 dim_pgmres = 5 @@ -173,7 +178,7 @@ monitor_pgmres = .false. ortho_type = 'mgs' reltol_nonlin = 1e-8 - reltol_fgmres = 1e-2 + reltol_fgmres = 1e-1 reltol_pgmres = 1e-6 algo_nonlin = 'picard' use_mean_vrel = .true. diff --git a/configuration/scripts/machines/Macros.narwhal_aocc b/configuration/scripts/machines/Macros.narwhal_aocc index 44b1dc2f6..7cd71218d 100644 --- a/configuration/scripts/machines/Macros.narwhal_aocc +++ b/configuration/scripts/machines/Macros.narwhal_aocc @@ -49,7 +49,7 @@ INCLDIR := $(INCLDIR) ifeq ($(ICE_THREADED), true) LDFLAGS += -mp - CFLAGS += -mp +# CFLAGS += -mp FFLAGS += -mp else LDFLAGS += -nomp diff --git a/configuration/scripts/machines/Macros.narwhal_intelhpcx b/configuration/scripts/machines/Macros.narwhal_intelhpcx new file mode 100644 index 000000000..0e2a94b20 --- /dev/null +++ b/configuration/scripts/machines/Macros.narwhal_intelhpcx @@ -0,0 +1,57 @@ +#============================================================================== +# Macros file for NAVYDSRC narwhal, intel compiler with hpcx hcoll openmpi +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise -fcommon + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -fp-model precise -convert big_endian -assume byterecl -ftz -traceback +# -mcmodel medium -shared-intel +FFLAGS_NOOPT:= -O0 + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created +# FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -init=snan,arrays +else + FFLAGS += -O2 +endif + +SCC := cc +SFC := ftn +MPICC := mpicc +MPIFC := mpif90 + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +# defined by module +NETCDF_PATH := $(NETCDF_DIR) +#PNETCDF_PATH := $(PNETCDF) +#PNETCDF_PATH := /glade/apps/opt/pnetcdf/1.3.0/intel/default +#LAPACK_LIBDIR := /glade/apps/opt/lapack/3.4.2/intel/12.1.5/lib + +#PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs + +INCLDIR := $(INCLDIR) +INCLDIR += -I$(NETCDF_PATH)/include + +LIB_NETCDF := $(NETCDF_PATH)/lib +#LIB_PNETCDF := $(PNETCDF_PATH)/lib +#LIB_MPI := $(IMPILIBDIR) +SLIBS := $(HCOLL_LIBS) -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + diff --git a/configuration/scripts/machines/env.badger_intel b/configuration/scripts/machines/env.badger_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cheyenne_gnu b/configuration/scripts/machines/env.cheyenne_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cheyenne_intel b/configuration/scripts/machines/env.cheyenne_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cheyenne_pgi b/configuration/scripts/machines/env.cheyenne_pgi old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.compy_intel b/configuration/scripts/machines/env.compy_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cori_intel b/configuration/scripts/machines/env.cori_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.gaea_intel b/configuration/scripts/machines/env.gaea_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.narwhal_aocc b/configuration/scripts/machines/env.narwhal_aocc old mode 100755 new mode 100644 index 6d6822f46..875296520 --- a/configuration/scripts/machines/env.narwhal_aocc +++ b/configuration/scripts/machines/env.narwhal_aocc @@ -14,21 +14,18 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-aocc/8.1.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload aocc -module load aocc/2.2.0.1 -module unload cray-mpich -module load cray-mpich/8.1.5 - +module load PrgEnv-aocc +module load cpe/22.03 +module load aocc/3.0.0 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -39,9 +36,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_cray b/configuration/scripts/machines/env.narwhal_cray old mode 100755 new mode 100644 index d0fcc9ba7..a9e5bd14a --- a/configuration/scripts/machines/env.narwhal_cray +++ b/configuration/scripts/machines/env.narwhal_cray @@ -14,21 +14,17 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-cray/8.1.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload cce -module load cce/12.0.3 -module unload cray-mpich -module load cray-mpich/8.1.9 - +module load PrgEnv-cray +module load cpe/22.03 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -39,9 +35,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME cray -setenv ICE_MACHINE_ENVINFO "cce 12.0.3, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "cce 13.0.2, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_gnu b/configuration/scripts/machines/env.narwhal_gnu old mode 100755 new mode 100644 index 51a272f4e..701920161 --- a/configuration/scripts/machines/env.narwhal_gnu +++ b/configuration/scripts/machines/env.narwhal_gnu @@ -14,21 +14,17 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-gnu/8.1.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload gcc -module load gcc/11.2.0 -module unload cray-mpich -module load cray-mpich/8.1.9 - +module load PrgEnv-gnu +module load cpe/22.03 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -39,9 +35,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" -setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "gnu fortran/c 11.2.0, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" +setenv ICE_MACHINE_ENVNAME gnu +setenv ICE_MACHINE_ENVINFO "gnu c/fortran 11.2.0 20210728, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_intel b/configuration/scripts/machines/env.narwhal_intel old mode 100755 new mode 100644 index f79d962ff..4cc60acac --- a/configuration/scripts/machines/env.narwhal_intel +++ b/configuration/scripts/machines/env.narwhal_intel @@ -14,21 +14,17 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-intel/8.0.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload intel -module load intel/2021.1 -module unload cray-mpich -module load cray-mpich/8.1.9 - +module load PrgEnv-intel +module load cpe/22.03 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -39,9 +35,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 2021.1 Beta 20201112, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "ifort 2021.4.0 20210910, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.onyx_cray b/configuration/scripts/machines/env.onyx_cray old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.onyx_gnu b/configuration/scripts/machines/env.onyx_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.onyx_intel b/configuration/scripts/machines/env.onyx_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.ppp6_gnu b/configuration/scripts/machines/env.ppp6_gnu index 69ed6ff8b..94f478774 100644 --- a/configuration/scripts/machines/env.ppp6_gnu +++ b/configuration/scripts/machines/env.ppp6_gnu @@ -8,7 +8,7 @@ endif if ("$inp" != "-nomodules") then # OpenMPI -source /usr/mpi/gcc/openmpi-4.1.2a1/bin/mpivars.csh +setenv PATH "/home/phb001/.local_rhel-8-icelake-64_gcc/bin:$PATH" # OpenMP setenv OMP_STACKSIZE 64M @@ -21,6 +21,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_gnu-impi b/configuration/scripts/machines/env.ppp6_gnu-impi index 461e09a43..61cd46ed9 100644 --- a/configuration/scripts/machines/env.ppp6_gnu-impi +++ b/configuration/scripts/machines/env.ppp6_gnu-impi @@ -18,6 +18,12 @@ setenv I_MPI_F90 gfortran setenv I_MPI_FC gfortran setenv I_MPI_CC gcc setenv I_MPI_CXX g++ +# Reproducible collectives +if (${ICE_BASEGEN} != ${ICE_SPVAL} || ${ICE_BASECOM} != ${ICE_SPVAL}) then + setenv I_MPI_CBWR 1 +endif +# Stop being buggy +setenv I_MPI_FABRICS ofi # OpenMP setenv OMP_STACKSIZE 64M @@ -30,6 +36,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_intel19 b/configuration/scripts/machines/env.ppp6_intel19 index 6cdd9a036..554b31e42 100644 --- a/configuration/scripts/machines/env.ppp6_intel19 +++ b/configuration/scripts/machines/env.ppp6_intel19 @@ -17,6 +17,12 @@ setenv FOR_DUMP_CORE_FILE 1 source $ssmuse -d /fs/ssm/hpco/exp/intelpsxe-impi-19.0.3.199 setenv FI_PROVIDER verbs setenv I_MPI_DEBUG_COREDUMP 1 +# Reproducible collectives +if (${ICE_BASEGEN} != ${ICE_SPVAL} || ${ICE_BASECOM} != ${ICE_SPVAL}) then + setenv I_MPI_CBWR 1 +endif +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d hpco/exp/hdf5-netcdf4/serial/static/intel-19.0.3.199/02 @@ -31,6 +37,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT "qsub" setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT P0000000 diff --git a/configuration/scripts/options/set_nml.bgczm b/configuration/scripts/options/set_nml.bgczm index 53513ca87..2644576cd 100644 --- a/configuration/scripts/options/set_nml.bgczm +++ b/configuration/scripts/options/set_nml.bgczm @@ -28,4 +28,3 @@ tr_bgc_DON = .true. tr_bgc_Fe = .true. modal_aero = .true. # dEdd_algae = .true. -tfrz_option = 'mushy' diff --git a/configuration/scripts/options/set_nml.boxchan1e b/configuration/scripts/options/set_nml.boxchan1e deleted file mode 100644 index cf8b0d314..000000000 --- a/configuration/scripts/options/set_nml.boxchan1e +++ /dev/null @@ -1,56 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt_unit = 'd' -npt = 5 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'channel_oneeast' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .false. -ew_boundary_type = 'cyclic' -ns_boundary_type = 'open' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -atmbndy = 'constant' -atm_data_type = 'uniform_east' -ocn_data_type = 'calm' -ice_data_type = 'block' -ice_data_conc = 'p5' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -tfrz_option = 'mushy' -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' -f_sigP = 'd1' -f_dvidtd = 'd1' diff --git a/configuration/scripts/options/set_nml.boxchan1n b/configuration/scripts/options/set_nml.boxchan1n deleted file mode 100644 index f90d4da0c..000000000 --- a/configuration/scripts/options/set_nml.boxchan1n +++ /dev/null @@ -1,56 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt_unit = 'd' -npt = 5 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'channel_onenorth' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .false. -ew_boundary_type = 'open' -ns_boundary_type = 'cyclic' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -atmbndy = 'constant' -atm_data_type = 'uniform_north' -ocn_data_type = 'calm' -ice_data_type = 'block' -ice_data_conc = 'p5' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -tfrz_option = 'mushy' -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' -f_sigP = 'd1' -f_dvidtd = 'd1' diff --git a/configuration/scripts/options/set_nml.boxclosed b/configuration/scripts/options/set_nml.boxclosed index d55faa302..f5ff6369c 100644 --- a/configuration/scripts/options/set_nml.boxclosed +++ b/configuration/scripts/options/set_nml.boxclosed @@ -5,27 +5,31 @@ npt = 5 ice_ic = 'internal' restart_ext = .true. histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'default' +grid_type = 'rectangular' +kmt_type = 'channel_oneeast' dxrect = 16.e5 dyrect = 16.e5 -close_boundaries = .true. -ew_boundary_type = 'open' +close_boundaries = .false. +ew_boundary_type = 'cyclic' ns_boundary_type = 'open' tr_iage = .false. tr_FY = .false. tr_lvl = .false. tr_pond_lvl = .false. ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'zero' atmbndy = 'constant' +<<<<<<<< HEAD:configuration/scripts/options/set_nml.boxchan1e +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'block' +ice_data_conc = 'p5' +ice_data_dist = 'uniform' +======== +>>>>>>>> 7757945 (Update CICE (#54)):configuration/scripts/options/set_nml.boxclosed calc_strair = .false. rotate_wind = .false. restore_ice = .false. +tfrz_option = 'mushy' f_aice = 'd1' f_hi = 'd1' f_hs = 'd' @@ -38,16 +42,18 @@ f_uocn = 'd' f_vocn = 'd' f_strairx = 'd1' f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' f_strocnx = 'd1' f_strocny = 'd1' -f_strintx = 'd1' +f_strintx = 'd1' f_strinty = 'd1' f_taubx = 'd1' f_tauby = 'd1' f_divu = 'd1' f_sig1 = 'd1' f_sig2 = 'd1' +f_sigP = 'd1' +f_dvidtd = 'd1' diff --git a/configuration/scripts/options/set_nml.gbox180 b/configuration/scripts/options/set_nml.gbox180 index 7b139f94a..b8a07840b 100644 --- a/configuration/scripts/options/set_nml.gbox180 +++ b/configuration/scripts/options/set_nml.gbox180 @@ -1,4 +1,8 @@ -ice_ic = 'default' +ice_ic = 'internal' grid_type = 'rectangular' +kmt_type = 'default' atm_data_type = 'box2001' +ocn_data_type = 'box2001' ice_data_type = 'box2001' +ice_data_conc = 'box2001' +ice_data_dist = 'box2001' diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index 4da4dd110..3007380ab 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -21,6 +21,7 @@ restart gx3 12x2 alt03,maskhalo,droundrobin restart gx3 4x4 alt04 restart gx3 4x4 alt05 restart gx3 8x2 alt06 +restart gx3 8x3 alt07 restart gx3 18x2 debug,maskhalo restart gx3 6x2 alt01,debug,short restart gx3 8x2 alt02,debug,short @@ -29,6 +30,7 @@ smoke gx3 12x2 alt03,debug,short,maskhalo,droundrobin smoke gx3 4x4 alt04,debug,short smoke gx3 4x4 alt05,debug,short smoke gx3 8x2 alt06,debug,short +smoke gx3 8x3 alt07,debug,short smoke gx3 10x2 debug,diag1,run5day,gx3sep2 smoke gx3 7x2x5x29x12 diag1,bigdiag,run1day,debug restart gbox128 4x2 short @@ -40,8 +42,10 @@ restart gbox128 4x4 boxrestore,short smoke gbox128 4x4 boxrestore,short,debug restart gbox80 1x1 box2001 smoke gbox80 1x1 boxslotcyl +smoke gbox12 1x1x12x12x1 boxchan,diag1,debug +restart gx3 8x2 modal smoke gx3 8x2 bgcz -smoke gx3 8x2 bgcz,debug +smoke gx3 8x2 bgczm,debug smoke gx3 8x1 bgcskl,debug #smoke gx3 4x1 bgcz,thread smoke_gx3_8x2_bgcz restart gx1 4x2 bgcsklclim,medium @@ -58,9 +62,9 @@ restart gx3 4x2 fsd12,debug,short smoke gx3 8x2 fsd12ww3,diag24,run1day smoke gx3 4x1 isotope,debug restart gx3 8x2 isotope -smoke gx3 4x1 snwITDrdg,snwgrain,icdefault,debug +smoke gx3 4x1 snwitdrdg,snwgrain,icdefault,debug smoke gx3 4x1 snw30percent,icdefault,debug -restart gx3 8x2 snwITDrdg,icdefault,snwgrain +restart gx3 8x2 snwitdrdg,icdefault,snwgrain restart gx3 4x4 gx3ncarbulk,iobinary restart gx3 4x4 histall,precision8,cdf64 smoke gx3 30x1 bgcz,histall @@ -70,5 +74,6 @@ smoke gx3 8x2 diag24,run5day,zsal,debug restart gx3 8x2 zsal restart gx3 8x2 gx3ncarbulk,debug restart gx3 4x4 gx3ncarbulk,diag1 -restart gx1 24x1 gx1coreii,short smoke gx3 4x1 calcdragio +restart gx3 4x2 atmbndyconstant +restart gx3 4x2 atmbndymixed diff --git a/configuration/scripts/tests/baseline.script b/configuration/scripts/tests/baseline.script index bb8f50a1f..2700fe71f 100644 --- a/configuration/scripts/tests/baseline.script +++ b/configuration/scripts/tests/baseline.script @@ -29,7 +29,7 @@ if (${ICE_BASECOM} != ${ICE_SPVAL}) then set base_file = `ls -1t ${ICE_BASELINE}/${ICE_BASECOM}/${ICE_TESTNAME}/cice.runlog* | head -1` echo "" - echo "bfb Log Compare Mode:" + echo "Regression Log Compare Mode:" echo "base_file: ${base_file}" echo "test_file: ${test_file}" @@ -58,7 +58,7 @@ if (${ICE_BASECOM} != ${ICE_SPVAL}) then endif echo "" - echo "Regression Compare Mode:" + echo "Regression Restart Compare Mode:" echo "base_dir: ${base_dir}" echo "test_dir: ${test_dir}" @@ -71,7 +71,7 @@ if (${ICE_BASECOM} != ${ICE_SPVAL}) then set base_file = `ls -1t ${ICE_BASELINE}/${ICE_BASECOM}/${ICE_TESTNAME}/cice.runlog* | head -1` echo "" - echo "bfb Log Compare Mode:" + echo "Regression Log Compare Mode:" echo "base_file: ${base_file}" echo "test_file: ${test_file}" diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index e2731dd39..d9752073f 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -1,30 +1,22 @@ # Test Grid PEs Sets BFB-compare -smoke gx3 1x1x100x116x1 reprosum,run10day -smoke gx1 32x1x16x16x32 reprosum,run10day -smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd -smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd -smoke gx3 1x1x100x116x1 reprosum,run10day,gridc -smoke gx1 32x1x16x16x32 reprosum,run10day,gridc - smoke gx3 8x2 diag1,run5day smoke gx3 8x4 diag1,run5day,debug restart gx3 4x2 debug,diag1 restart2 gx1 16x2 debug,diag1 -restart tx1 40x2 diag1 smoke gbox12 1x1x12x12x1 boxchan -smoke gbox80 4x2 boxchan1e,debug -smoke gbox80 8x1 boxchan1n smoke gbox80 1x1 box2001 smoke gbox80 2x2 boxwallblock smoke gbox80 1x1 boxslotcyl smoke gbox80 2x4 boxnodyn -smoke gbox80 4x2 boxclosed,boxforcee,run1day -smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands -smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day +smoke gbox80 4x2 boxclosed,boxforcee,run1day +smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands +smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid +smoke gx3 1x1x100x116x1 reprosum,run10day smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day +smoke gx1 32x1x16x16x32 reprosum,run10day smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest smoke_gx1_32x1x16x16x32_reprosum_run10day @@ -33,21 +25,20 @@ smoke gx3 8x2 diag1,run5day,gridcd smoke gx3 8x4 diag1,run5day,debug,gridcd restart gx3 4x2 debug,diag1,gridcd restart2 gx1 16x2 debug,diag1,gridcd -restart tx1 40x2 diag1,gridcd smoke gbox12 1x1x12x12x1 boxchan,gridcd -smoke gbox80 4x2 boxchan1e,debug,gridcd -smoke gbox80 8x1 boxchan1n,gridcd smoke gbox80 1x1 box2001,gridcd smoke gbox80 2x2 boxwallblock,gridcd smoke gbox80 1x1 boxslotcyl,gridcd smoke gbox80 2x4 boxnodyn,gridcd -smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridcd -smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridcd -smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridcd +smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridcd +smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridcd +smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridcd smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day +smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day @@ -56,21 +47,20 @@ smoke gx3 8x2 diag1,run5day,gridc smoke gx3 8x4 diag1,run5day,debug,gridc restart gx3 4x2 debug,diag1,gridc restart2 gx1 16x2 debug,diag1,gridc -restart tx1 40x2 diag1,gridc smoke gbox12 1x1x12x12x1 boxchan,gridc -smoke gbox80 4x2 boxchan1e,debug,gridc -smoke gbox80 8x1 boxchan1n,gridc smoke gbox80 1x1 box2001,gridc smoke gbox80 2x2 boxwallblock,gridc smoke gbox80 1x1 boxslotcyl,gridc smoke gbox80 2x4 boxnodyn,gridc -smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridc -smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridc -smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridc +smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridc +smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridc +smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridc smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridc +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day +smoke gx1 32x1x16x16x32 reprosum,run10day,gridc smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day diff --git a/configuration/scripts/tests/io_suite.ts b/configuration/scripts/tests/io_suite.ts index 4d5129578..84d064f32 100644 --- a/configuration/scripts/tests/io_suite.ts +++ b/configuration/scripts/tests/io_suite.ts @@ -8,6 +8,7 @@ restart gx3 16x2 gx3ncarbulk,alt02,histall,iobinary,precision8 restart gx3 8x4 gx3ncarbulk,alt04,histall,iobinary,precision8 restart gx3 4x4 gx3ncarbulk,alt05,histall,iobinary restart gx3 14x2 gx3ncarbulk,alt06,histall,iobinary,precision8 +restart gx3 14x2 gx3ncarbulk,alt07,histall,iobinary,precision8 restart gx3 32x1 gx3ncarbulk,bgcz,histall,iobinary,precision8 restart gx3 16x2 gx3ncarbulk,bgcskl,histall,iobinary restart gx3 14x2 gx3ncarbulk,isotope,histall,iobinary,precision8 @@ -21,7 +22,8 @@ restart gx3 24x1 alt03,histall,ionetcdf,precision8 restart gx3 8x4 alt04,histall,ionetcdf,cdf64 restart gx3 8x4 alt05,histall,ionetcdf,precision8,cdf64 restart gx3 16x2 alt06,histall,ionetcdf -restart gx3 30x1 bgcz,histall,ionetcdf +restart gx3 16x2 alt07,histall,ionetcdf +restart gx3 30x1 bgczm,histall,ionetcdf restart gx3 15x2 bgcskl,histall,ionetcdf,precision8 restart gx3 31x1 isotope,histall,ionetcdf,cdf64 restart gx3 14x2 fsd12,histall,ionetcdf,precision8 @@ -34,7 +36,8 @@ restart gx3 24x1 alt03,histall,iopio1 restart gx3 8x4 alt04,histall,iopio1,precision8,cdf64 restart gx3 8x4 alt05,histall,iopio1,cdf64 restart gx3 32x1 alt06,histall,iopio1,precision8 -restart gx3 16x2 bgcz,histall,iopio1,precision8 +restart gx3 32x1 alt07,histall,iopio1,precision8 +restart gx3 16x2 bgczm,histall,iopio1,precision8 restart gx3 30x1 bgcskl,histall,iopio1 restart gx3 8x4 isotope,histall,iopio1,precision8,cdf64 restart gx3 12x2 fsd12,histall,iopio1,cdf64 @@ -47,7 +50,8 @@ restart gx3 24x1 alt03,histall,iopio2,precision8 restart gx3 8x4 alt04,histall,iopio2 restart gx3 8x4 alt05,histall,iopio2,precision8,cdf64 restart gx3 16x2 alt06,histall,iopio2,cdf64 -restart gx3 16x2 bgcz,histall,iopio2,cdf64 +restart gx3 16x2 alt07,histall,iopio2,cdf64 +restart gx3 16x2 bgczm,histall,iopio2,cdf64 restart gx3 30x1 bgcskl,histall,iopio2,precision8 restart gx3 8x4 isotope,histall,iopio2 restart gx3 12x2 fsd12,histall,iopio2,precision8,cdf64 @@ -60,7 +64,8 @@ restart gx3 24x1 alt03,histall,iopio1p,cdf64 restart gx3 8x4 alt04,histall,iopio1p,precision8 restart gx3 8x4 alt05,histall,iopio1p restart gx3 6x4 alt06,histall,iopio1p,precision8,cdf64 -restart gx3 16x2 bgcz,histall,iopio1p,precision8,cdf64 +restart gx3 6x4 alt07,histall,iopio1p,precision8,cdf64 +restart gx3 16x2 bgczm,histall,iopio1p,precision8,cdf64 restart gx3 30x1 bgcskl,histall,iopio1p,cdf64 restart gx3 8x4 isotope,histall,iopio1p,precision8 restart gx3 12x2 fsd12,histall,iopio1p @@ -73,7 +78,8 @@ restart gx3 24x1 alt03,histall,iopio2p,precision8,cdf64 restart gx3 8x4 alt04,histall,iopio2p,cdf64 restart gx3 8x4 alt05,histall,iopio2p,precision8 restart gx3 24x1 alt06,histall,iopio2p -restart gx3 16x2 bgcz,histall,iopio2p +restart gx3 24x1 alt07,histall,iopio2p +restart gx3 16x2 bgczm,histall,iopio2p restart gx3 30x1 bgcskl,histall,iopio2p,precision8,cdf64 restart gx3 8x4 isotope,histall,iopio2p,cdf64 restart gx3 12x2 fsd12,histall,iopio2p,precision8 diff --git a/configuration/scripts/tests/nothread_suite.ts b/configuration/scripts/tests/nothread_suite.ts index 1878e8c29..fb263a945 100644 --- a/configuration/scripts/tests/nothread_suite.ts +++ b/configuration/scripts/tests/nothread_suite.ts @@ -52,7 +52,7 @@ restart gx3 12x1 jra55_gx3,short #restart gx1 24x1 jra55_gx1,short smoke gx3 16x1 bgcz -smoke gx3 16x1 bgcz,debug +smoke gx3 16x1 bgczm,debug smoke gx3 24x1 bgcskl,debug #tcraig, hangs nodes intermittently on izumi #restart gx1 128x1 bgcsklclim,medium diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index 5d5e18376..686fa72db 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -8,7 +8,7 @@ smoke gx3 4x4 alt04,reprosum,run10day smoke gx3 4x4 alt05,reprosum,run10day smoke gx3 8x2 alt06,reprosum,run10day smoke gx3 7x2 alt07,reprosum,run10day -smoke gx3 8x2 bgcz,reprosum,run10day +smoke gx3 8x2 bgczm,reprosum,run10day smoke gx1 15x2 reprosum,run10day smoke gx1 15x2 seabedprob,reprosum,run10day smoke gx3 14x2 fsd12,reprosum,run10day @@ -34,7 +34,7 @@ smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt05_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt06_reprosum_run10day smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread smoke_gx3_7x2_alt07_reprosum_run10day -smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgcz_reprosum_run10day +smoke gx3 8x1 bgczm,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgczm_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread smoke_gx3_14x2_fsd12_reprosum_run10day @@ -62,7 +62,7 @@ smoke gx3 4x4 alt04,reprosum,run10day,gridc smoke gx3 4x4 alt05,reprosum,run10day,gridc smoke gx3 8x2 alt06,reprosum,run10day,gridc smoke gx3 7x2 alt07,reprosum,run10day,gridc -smoke gx3 8x2 bgcz,reprosum,run10day,gridc +smoke gx3 8x2 bgczm,reprosum,run10day,gridc smoke gx1 15x2 reprosum,run10day,gridc smoke gx1 15x2 seabedprob,reprosum,run10day,gridc smoke gx3 14x2 fsd12,reprosum,run10day,gridc @@ -88,7 +88,7 @@ smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt05_gridc_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt06_gridc_reprosum_run10day smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_7x2_alt07_gridc_reprosum_run10day -smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgcz_gridc_reprosum_run10day +smoke gx3 8x1 bgczm,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgczm_gridc_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_14x2_fsd12_gridc_reprosum_run10day @@ -116,7 +116,7 @@ smoke gx3 4x4 alt04,reprosum,run10day,gridcd smoke gx3 4x4 alt05,reprosum,run10day,gridcd smoke gx3 8x2 alt06,reprosum,run10day,gridcd smoke gx3 7x2 alt07,reprosum,run10day,gridcd -smoke gx3 8x2 bgcz,reprosum,run10day,gridcd +smoke gx3 8x2 bgczm,reprosum,run10day,gridcd smoke gx1 15x2 reprosum,run10day,gridcd smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd smoke gx3 14x2 fsd12,reprosum,run10day,gridcd @@ -142,7 +142,7 @@ smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt05_gridcd_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt06_gridcd_reprosum_run10day smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_7x2_alt07_gridcd_reprosum_run10day -smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgcz_gridcd_reprosum_run10day +smoke gx3 8x1 bgczm,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgczm_gridcd_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_14x2_fsd12_gridcd_reprosum_run10day diff --git a/configuration/tools/cice4_restart_conversion/convert_restarts.f90 b/configuration/tools/cice4_restart_conversion/convert_restarts.f90 index 30c952510..3717f7d86 100644 --- a/configuration/tools/cice4_restart_conversion/convert_restarts.f90 +++ b/configuration/tools/cice4_restart_conversion/convert_restarts.f90 @@ -39,12 +39,8 @@ program convert_restarts logical (kind=log_kind), parameter :: & oceanmixed_ice = .true., & ! if true, read/write ocean mixed layer fields heat_capacity = .true., & ! if true, ice has nonzero heat capacity -#ifdef UNDEPRECATE_0LAYER - ! if false, use zero-layer thermodynamics -#else ! heat_capacity = .false. (zero-layer thermodynamics) ! has been deprecated in CICE and Icepack -#endif diag = .true. ! write min/max diagnostics for fields ! file names diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 99679e791..a8b9d08f1 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -9,7 +9,7 @@ This index defines many of the symbols used frequently in the CICE model code. Namelist variables are partly included here but also documented elsewhere, see Section :ref:`tabnamelist`. All quantities in the code are expressed in MKS units (temperatures may take -either Celsius or Kelvin units). +either Celsius or Kelvin units). Deprecated parameters are listed at the end. .. csv-table:: *Alphabetical Index* :header: " ", " ", " " @@ -72,7 +72,7 @@ either Celsius or Kelvin units). "awtvdf", "weighting factor for visible, diffuse albedo", "0.63282" "awtvdr", "weighting factor for visible, direct albedo", "0.00318" "**B**", "", "" - "bfb_flag", "for bit-for-bit reproducible diagnostics", "" + "bfbflag", "for bit-for-bit reproducible diagnostics, and reproducible outputs when using the VP solver", "" "bgc_data_dir", "data directory for bgc", "" "bgc_data_type", "source of silicate, nitrate data", "" "bgc_flux_type", "ice–ocean flux velocity", "" @@ -330,7 +330,7 @@ either Celsius or Kelvin units). "hpmin", "minimum melt pond depth (shortwave)", "0.005 m" "hpondn", "melt pond depth", "m" "hs_min", "minimum thickness for which :math:`T_s` is computed", "1.\ :math:`\times`\ 10\ :math:`^{-4}` m" - "hs0", "snow depth at which transition to ice occurs (dEdd)", "0.03 m" + "hs0", "snow depth at which transition to ice occurs (dEdd)", "m" "hs1", "snow depth of transition to pond ice", "0.03 m" "hs_ssl", "snow surface scattering layer thickness", "0.04 m" "Hstar", "determines mean thickness of ridged ice", "25. m" @@ -354,10 +354,10 @@ either Celsius or Kelvin units). "icells", "number of grid cells with specified property (for vectorization)", "" "iceruf", "ice surface roughness at atmosphere interface", "5.\ :math:`\times`\ 10\ :math:`^{-4}` m" "iceruf_ocn", "under-ice roughness (at ocean interface)", "0.03 m" - "iceemask", "ice extent mask (E-cell)", "" - "icenmask", "ice extent mask (N-cell)", "" - "icetmask", "ice extent mask (T-cell)", "" - "iceumask", "ice extent mask (U-cell)", "" + "iceEmask", "dynamics ice extent mask (E-cell)", "" + "iceNmask", "dynamics ice extent mask (N-cell)", "" + "iceTmask", "dynamics ice extent mask (T-cell)", "" + "iceUmask", "dynamics ice extent mask (U-cell)", "" "idate", "the date at the end of the current time step (yyyymmdd)", "" "idate0", "initial date", "" "ierr", "general-use error flag", "" @@ -699,7 +699,6 @@ either Celsius or Kelvin units). "tr_FY", "if true, use first-year area tracer", "" "tr_iage", "if true, use ice age tracer", "" "tr_lvl", "if true, use level ice area and volume tracers", "" - "tr_pond_cesm", "if true, use CESM melt pond scheme", "" "tr_pond_lvl", "if true, use level-ice melt pond scheme", "" "tr_pond_topo", "if true, use topo melt pond scheme", "" "trcr", "ice tracers", "" @@ -772,9 +771,11 @@ either Celsius or Kelvin units). "zref", "reference height for stability", "10. m" "zTrf", "reference height for :math:`T_{ref}`, :math:`Q_{ref}`, :math:`U_{ref}`", "2. m" "zvir", "gas constant (water vapor)/gas constant (air) - 1", "0.606" - -.. - ktherm=0 has been deprecated + "**Deprecated options and parameters**", "", "" "heat_capacity", "if true, use salinity-dependent thermodynamics", "T" "kseaice", "thermal conductivity of ice for zero-layer thermodynamics", "2.0 W/m/deg" "ktherm", "thermodynamic formulation (0 = zero-layer, 1 = :cite:`Bitz99`, 2 = mushy)", "" + "tr_pond_cesm", "if true, use CESM melt pond scheme", "" + +.. + new deprecation comments diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index d4e209d8a..e6b918538 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -5,56 +5,6 @@ Dynamics ======== -There are different approaches in the CICE code for representing sea ice -rheology and for solving the sea ice momentum equation. The viscous-plastic (VP) originally developed by :cite:`Hibler79`, -the elastic-viscous-plastic (EVP) :cite:`Hunke97` model represents a modification of the -standard viscous-plastic (VP) model for sea ice dynamics. The elastic-anisotropic-plastic (EAP) model, -on the other hand, explicitly accounts for the observed sub-continuum -anisotropy of the sea ice cover :cite:`Wilchinsky06,Weiss09`. If -``kdyn`` = 1 in the namelist then the EVP model is used (module -**ice\_dyn\_evp.F90**), while ``kdyn`` = 2 is associated with the EAP -model (**ice\_dyn\_eap.F90**), and ``kdyn`` = 3 is associated with the -VP model (**ice\_dyn\_vp.F90**). - -At times scales associated with the -wind forcing, the EVP model reduces to the VP model while the EAP model -reduces to the anisotropic rheology described in detail in -:cite:`Wilchinsky06,Tsamados13`. At shorter time scales the -adjustment process takes place in both models by a numerically more -efficient elastic wave mechanism. While retaining the essential physics, -this elastic wave modification leads to a fully explicit numerical -scheme which greatly improves the model’s computational efficiency. - -The EVP sea ice dynamics model is thoroughly documented in -:cite:`Hunke97`, :cite:`Hunke01`, -:cite:`Hunke02` and :cite:`Hunke03` and the EAP -dynamics in :cite:`Tsamados13`. Simulation results and -performance of the EVP and EAP models have been compared with the VP -model and with each other in realistic simulations of the Arctic -respectively in :cite:`Hunke99` and -:cite:`Tsamados13`. - -The EVP numerical -implementation in this code release is that of :cite:`Hunke02` -and :cite:`Hunke03`, with revisions to the numerical solver as -in :cite:`Bouillon13`. The implementation of the EAP sea ice -dynamics into CICE is described in detail in -:cite:`Tsamados13`. - -The VP solver implementation mostly follows :cite:`Lemieux08`, with -FGMRES :cite:`Saad93` as the linear solver and GMRES as the preconditioner. -Note that the VP solver has not yet been tested on the ``tx1`` grid or with -threading enabled. - -Here we summarize the equations and -direct the reader to the above references for details. - -.. _momentum: - -******** -Momentum -******** - The force balance per unit area in the ice pack is given by a two-dimensional momentum equation :cite:`Hibler79`, obtained by integrating the 3D equation through the thickness of the ice in the @@ -93,19 +43,72 @@ For clarity, the two components of Equation :eq:`vpmom` are -C_bv-mfu - mg{\partial H_\circ\over\partial y}. \end{aligned} :label: momsys +On the B grid, the equations above are solved at the U point for the collocated u and v components (see figure :ref:`fig-Bgrid`). On the C grid, however, the two components are not collocated: the u component is at the E point while the v component is at the N point. -A bilinear discretization is used for the stress terms +The B grid spatial discretization is based on a variational method described in :cite:`Hunke97` and :cite:`Hunke02`. A bilinear discretization is used for the stress terms :math:`\partial\sigma_{ij}/\partial x_j`, which enables the discrete equations to be derived from the continuous equations written in curvilinear coordinates. In this manner, metric terms associated with the curvature of the grid are incorporated into the discretization explicitly. Details pertaining to -the spatial discretization are found in :cite:`Hunke02`. +the spatial discretization are found in :cite:`Hunke02` + +On the C grid, however, a finite difference approach is used for the spatial discretization. The C grid discretization is based on :cite:`Bouillon09`, :cite:`Bouillon13` and :cite:`Kimmritz16`. + +There are different approaches in the CICE code for representing sea ice +rheology and for solving the sea ice momentum equation: the viscous-plastic (VP) rheology :cite:`Hibler79` with an implicit method, +the elastic-viscous-plastic (EVP) :cite:`Hunke97` model which represents a modification of the +VP model, the revised EVP (rEVP) approach :cite:`Lemieux12,Bouillon13` and the elastic-anisotropic-plastic (EAP) model which explicitly accounts for the sub-continuum +anisotropy of the sea ice cover :cite:`Wilchinsky06,Weiss09`. If +``kdyn`` = 1 in the namelist then the EVP model is used (module +**ice\_dyn\_evp.F90**), while ``kdyn`` = 2 is associated with the EAP +model (**ice\_dyn\_eap.F90**), and ``kdyn`` = 3 is associated with the +VP model (**ice\_dyn\_vp.F90**). The rEVP approach can be used by setting ``kdyn`` = 1 and ``revised_evp`` = true in the namelist. + +At times scales associated with the +wind forcing, the EVP model reduces to the VP model while the EAP model +reduces to the anisotropic rheology described in detail in +:cite:`Wilchinsky06,Tsamados13`. At shorter time scales the +adjustment process takes place in both models by a numerically more +efficient elastic wave mechanism. While retaining the essential physics, +this elastic wave modification leads to a fully explicit numerical +scheme which greatly improves the model’s computational efficiency. The rEVP is also a fully explicit scheme which by construction should lead to the VP solution. + +The EVP sea ice dynamics model is thoroughly documented in +:cite:`Hunke97`, :cite:`Hunke01`, +:cite:`Hunke02` and :cite:`Hunke03` and the EAP +dynamics in :cite:`Tsamados13`. Simulation results and +performance of the EVP and EAP models have been compared with the VP +model and with each other in realistic simulations of the Arctic +respectively in :cite:`Hunke99` and +:cite:`Tsamados13`. + +The EVP numerical +implementation in this code release is that of :cite:`Hunke02` +and :cite:`Hunke03`, with revisions to the numerical solver as +in :cite:`Bouillon13`. Details about the rEVP solver can be found in :cite:`Lemieux12`, :cite:`Bouillon13`, :cite:`Kimmritz15` and :cite:`Koldunov19`. The implementation of the EAP sea ice +dynamics into CICE is described in detail in +:cite:`Tsamados13`. + +The VP solver implementation mostly follows :cite:`Lemieux08`, with +FGMRES :cite:`Saad93` as the linear solver and GMRES as the preconditioner. +Note that the VP solver has not yet been tested on the ``tx1`` grid. + +The EVP, rEVP, EAP and VP approaches are all available with the B grid. However, at the moment, only the EVP and rEVP schemes are possible with the C grid. + +Here we summarize the equations and +direct the reader to the above references for details. + +.. _momentumTS: + +********************** +Momentum time stepping +********************** .. _evp-momentum: -Elastic-Viscous-Plastic -~~~~~~~~~~~~~~~~~~~~~~~ +EVP time discretization and solution +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The momentum equation is discretized in time as follows, for the classic EVP approach. @@ -118,24 +121,23 @@ variables used in the code. .. math:: \underbrace{\left({m\over\Delta t_e}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} - - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{k+1} + - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{l} = &\underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} \\ &+ {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t_e}u^k, :label: umom .. math:: - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{k+1} + \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + \underbrace{\left({m\over\Delta t_e}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca}v^{k+1} = &\underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} \\ &+ {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t_e}v^k, :label: vmom -and :math:`{\tt vrel}\ \cdot\ {\tt waterx(y)}= {\tt taux(y)}`. +where :math:`{\tt vrel}\ \cdot\ {\tt waterx(y)}= {\tt taux(y)}` and the definitions of :math:`u^{l}` and :math:`v^{l}` vary depending on the grid. -We solve this system of equations analytically for :math:`u^{k+1}` and -:math:`v^{k+1}`. Define +As :math:`u` and :math:`v` are collocated on the B grid, :math:`u^{l}` and :math:`v^{l}` are respectively :math:`u^{k+1}` and :math:`v^{k+1}` such that this system of equations can be solved as follows. Define .. math:: \hat{u} = F_u + \tau_{ax} - mg{\partial H_\circ\over\partial x} + {\tt vrel} \left(U_w\cos\theta - V_w\sin\theta\right) + {m\over\Delta t_e}u^k @@ -169,10 +171,67 @@ where b = mf + {\tt vrel}\sin\theta. :label: cevpb +Note that the time discretization and solution method for the EAP is exactly the same as for the B grid EVP. More details on the EAP model are given in Section :ref:`stress-eap`. + +However, on the C grid, :math:`u` and :math:`v` are not collocated. When solving the :math:`u` momentum equation for :math:`u^{k+1}` (at the E point), :math:`v^{l}=v^{k}_{int}` where :math:`v^{k}_{int}` is :math:`v^{k}` from the surrounding N points interpolated to the E point. The same approach is used for the :math:`v` momentum equation. With this explicit treatment of the off-diagonal terms :cite:`Kimmritz16`, :math:`u^{k+1}` and :math:`v^{k+1}` are obtained by solving + +.. math:: + \begin{aligned} + u^{k+1} = {\hat{u} + b v^{k}_{int} \over a} \\ + v^{k+1} = {\hat{v} - b u^{k}_{int} \over a}. \end{aligned} + +.. _revp-momentum: + +Revised EVP time discretization and solution +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The revised EVP approach is based on a pseudo-time iterative scheme :cite:`Lemieux12`, :cite:`Bouillon13`, :cite:`Kimmritz15`. By construction, the revised EVP approach should lead to the VP solution +(given the right numerical parameters and a sufficiently large number of iterations). To do so, the inertial term is formulated such that it matches the backward Euler approach of +implicit solvers and there is an additional term for the pseudo-time iteration. Hence, with the revised approach, the discretized momentum equations :eq:`umom` and :eq:`vmom` become + +.. math:: + {\beta^*(u^{k+1}-u^k)\over\Delta t_e} + {m(u^{k+1}-u^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)} u^{k+1} + - & {\left(mf+{\tt vrel}\sin\theta\right)} v^{l} + = {{\partial\sigma_{1j}^{k+1}\over\partial x_j}} + + {\tau_{ax}} \\ + & - {mg{\partial H_\circ\over\partial x} } + + {\tt vrel} {\left(U_w\cos\theta-V_w\sin\theta\right)}, + :label: umomr + +.. math:: + {\beta^*(v^{k+1}-v^k)\over\Delta t_e} + {m(v^{k+1}-v^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)}v^{k+1} + + & {\left(mf+{\tt vrel}\sin\theta\right)} u^{l} + = {{\partial\sigma_{2j}^{k+1}\over\partial x_j}} + + {\tau_{ay}} \\ + & - {mg{\partial H_\circ\over\partial y} } + + {\tt vrel}{\left(U_w\sin\theta+V_w\cos\theta\right)}, + :label: vmomr + +where :math:`\beta^*` is a numerical parameter and :math:`u^n, v^n` are the components of the previous time level solution. +With :math:`\beta=\beta^* \Delta t \left( m \Delta t_e \right)^{-1}` :cite:`Bouillon13`, these equations can be written as + +.. math:: + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} + - \underbrace{\left(mf+{\tt vrel} \sin\theta\right)}_{\tt ccb} & v^{l} + = \underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} \\ + & + {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t}(\beta u^k + u^n), + :label: umomr2 + +.. math:: + \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca} & v^{k+1} + = \underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} \\ + & + {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t}(\beta v^k + v^n), + :label: vmomr2 + +At this point, the solutions :math:`u^{k+1}` and :math:`v^{k+1}` for the B or the C grids are obtained in the same manner as for the standard EVP approach (see Section :ref:`evp-momentum` for details). + .. _vp-momentum: -Viscous-Plastic -~~~~~~~~~~~~~~~ +Implicit (VP) time discretization and solution +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the VP approach, equation :eq:`momsys` is discretized implicitly using a Backward Euler approach, and stresses are not computed explicitly: @@ -218,6 +277,15 @@ The Picard iterative process stops when :math:`\left\lVert \mathbf{u}_{k} \right Parameters for the FGMRES linear solver and the preconditioner can be controlled using additional namelist flags (see :ref:`dynamics_nml`). + +.. _surfstress: + +******************** +Surface stress terms +******************** + +The formulation for the wind stress is described in `Icepack Documentation `_. Below, some details about the ice-ocean stress and the seabed stress are given. + Ice-Ocean stress ~~~~~~~~~~~~~~~~ @@ -231,9 +299,8 @@ pending further testing. .. _seabed-stress: -*************** Seabed stress -*************** +~~~~~~~~~~~~~ CICE includes two options for calculating the seabed stress, i.e. the term in the momentum equation that represents the interaction @@ -254,49 +321,64 @@ grounding schemes. It is suggested to have a bathymetry field with water depths larger than 5 m that represents well shallow water (less than 30 m) regions such as the Laptev Sea and the East Siberian Sea. -Seabed stress based on linear keel draft (LKD) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +**Seabed stress based on linear keel draft (LKD)** This parameterization for the seabed stress is described in :cite:`Lemieux16`. It assumes that the largest keel draft varies linearly with the mean thickness in a grid cell (i.e. sea ice volume). The :math:`C_b` coefficients are expressed as .. math:: - C_b= k_2 \max [0,(h_u - h_{cu})] e^{-\alpha_b * (1 - a_u)} (\sqrt{u^2+v^2}+u_0)^{-1}, \\ + C_b= k_2 \max [0,(h - h_{c})] e^{-\alpha_b * (1 - a)} (\sqrt{u^2+v^2}+u_0)^{-1}, \\ :label: Cb where :math:`k_2` determines the maximum seabed stress that can be sustained by the grounded parameterized ridge(s), :math:`u_0` is a small residual velocity and :math:`\alpha_b` is a parameter to ensure that the seabed stress quickly drops when -the ice concentration is smaller than 1. In the code, :math:`k_2 \max [0,(h_u - h_{cu})] e^{-\alpha_b * (1 - a_u)}` is defined as -:math:`T_b`. The quantities :math:`h_u`, :math:`a_{u}` and :math:`h_{cu}` are calculated at -the 'u' point based on local ice conditions (surrounding tracer points). They are respectively given by +the ice concentration is smaller than 1. In the code, :math:`k_2 \max [0,(h - h_{c})] e^{-\alpha_b * (1 - a)}` is defined as +:math:`T_b`. + +On the B grid, the quantities :math:`h`, :math:`a` and :math:`h_{c}` are calculated at +the U point and are referred to as :math:`h_u`, :math:`a_{u}` and :math:`h_{cu}`. They are respectively given by .. math:: h_u=\max[v_i(i,j),v_i(i+1,j),v_i(i,j+1),v_i(i+1,j+1)], \\ :label: hu .. math:: - a_u=\max[a_i(i,j),a_i(i+1,j),a_i(i,j+1),a_i(i+1,j+1)]. \\ + a_u=\max[a_i(i,j),a_i(i+1,j),a_i(i,j+1),a_i(i+1,j+1)], \\ :label: au .. math:: h_{cu}=a_u h_{wu} / k_1, \\ :label: hcu -where the :math:`a_i` and :math:`v_i` are the total ice concentrations and ice volumes around the :math:`u` point :math:`i,j` and +where the :math:`a_i` and :math:`v_i` are the total ice concentrations and ice volumes around the U point :math:`i,j` and :math:`k_1` is a parameter that defines the critical ice thickness :math:`h_{cu}` at which the parameterized ridge(s) reaches the seafloor for a water depth :math:`h_{wu}=\min[h_w(i,j),h_w(i+1,j),h_w(i,j+1),h_w(i+1,j+1)]`. Given the formulation of :math:`C_b` in equation :eq:`Cb`, the seabed stress components are non-zero only when :math:`h_u > h_{cu}`. +As :math:`u` and :math:`v` are not collocated on the C grid, :math:`T_b` is calculated at E and N points. For example, at the E point, :math:`h_e`, :math:`a_{e}` and :math:`h_{ce}` are respectively + +.. math:: + h_e=\max[v_i(i,j),v_i(i+1,j)], \\ + :label: he + +.. math:: + a_e=\max[a_i(i,j),a_i(i+1,j)], \\ + :label: ae + +.. math:: + h_{ce}=a_e h_{we} / k_1, \\ + :label: hce + +where :math:`h_{we}=\min[h_w(i,j),h_w(i+1,j)]`. Similar calculations are done at the N points. + +To prevent unrealistic grounding, :math:`T_b` is set to zero when :math:`h_{wu}` +is larger than 30 m (same idea on the C grid depending on :math:`h_{we}` and :math:`h_{wn}`). This maximum value is chosen based on observations of large keels in the Arctic Ocean :cite:`Amundrud04`. + The maximum seabed stress depends on the weight of the ridge above hydrostatic balance and the value of :math:`k_2`. It is, however, the parameter :math:`k_1` that has the most notable impact on the simulated extent of landfast ice. The value of :math:`k_1` can be changed at runtime using the namelist variable ``k1``. -To prevent unrealistic grounding, :math:`T_b` is set to zero when :math:`h_{wu}` -is larger than 30 m. This maximum value is chosen based on observations of large -keels in the Arctic Ocean :cite:`Amundrud04`. - -Seabed stress based on probabilistic approach -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +**Seabed stress based on probabilistic approach** This more sophisticated grounding parameterization computes the seabed stress based on the probability of contact between the ice thickness distribution @@ -325,7 +407,7 @@ ITD and the seabed is given by .. math:: P_c=\int_{0}^{\inf} \int_{0}^{D(x)} g(x)b(y) dy dx \label{prob_contact}. -:math:`T_b` is first calculated at the 't' point (referred to as :math:`T_{bt}`). :math:`T_{bt}` depends on the weight of the ridge in excess of hydrostatic balance. The parameterization first calculates +:math:`T_b` is first calculated at the T point (referred to as :math:`T_{bt}`). :math:`T_{bt}` depends on the weight of the ridge in excess of hydrostatic balance. The parameterization first calculates .. math:: T_{bt}^*=\mu_s g \int_{0}^{\inf} \int_{0}^{D(x)} (\rho_i x - \rho_w @@ -336,23 +418,35 @@ and then obtains :math:`T_{bt}` by multiplying :math:`T_{bt}^*` by :math:`e^{-\a To calculate :math:`T_{bt}^*` in equation :eq:`Tbt`, :math:`f(x)` and :math:`b(y)` are discretized using many small categories (100). :math:`f(x)` is discretized between 0 and 50 m while :math:`b(y)` is truncated at plus and minus three :math:`\sigma_b`. :math:`f(x)` is also modified by setting it to zero after a certain percentile of the log-normal distribution. This percentile, which is currently set to 99.7%, notably affects the simulation of landfast ice and is used as a tuning parameter. Its impact is similar to the one of the parameter :math:`k_1` for the LKD method. -:math:`T_b` at the 'u' point is calculated from the 't' point values around it according to +On the B grid, :math:`T_b` at the U point is calculated from the T point values around it according to .. math:: - T_b=\max[T_{bt}(i,j),T_{bt}(i+1,j),T_{bt}(i,j+1),T_{bt}(i+1,j+1)]. \\ + T_{bu}=\max[T_{bt}(i,j),T_{bt}(i+1,j),T_{bt}(i,j+1),T_{bt}(i+1,j+1)]. \\ :label: Tb Following again the LKD method, the seabed stress coefficients are finally expressed as .. math:: - C_b= T_b (\sqrt{u^2+v^2}+u_0)^{-1}, \\ + C_b= T_{bu} (\sqrt{u^2+v^2}+u_0)^{-1}. \\ :label: Cb2 +On the C grid, :math:`T_b` is needs to be calculated at the E and N points. :math:`T_{be}` and :math:`T_{bn}` are respectively given by + +.. math:: + T_{be}=\max[T_{bt}(i,j),T_{bt}(i+1,j)], \\ + :label: Tbe + +.. math:: + T_{bn}=\max[T_{bt}(i,j),T_{bt}(i,j+1)]. \\ + :label: Tbn + +The :math:`C_{b}` are different at the E and N points and are respectively :math:`T_{be} (\sqrt{u^2+v^2_{int}}+u_0)^{-1}` and :math:`T_{bn} (\sqrt{u^2_{int} + v^2}+u_0)^{-1}` where :math:`v_{int}` (:math:`u_{int}`) is :math:`v` ( :math:`u`) interpolated to the E (N) point. + .. _internal-stress: -*************** -Internal stress -*************** +******** +Rheology +******** For convenience we formulate the stress tensor :math:`\bf \sigma` in terms of :math:`\sigma_1=\sigma_{11}+\sigma_{22}`, @@ -378,15 +472,6 @@ CICE can output the internal ice pressure which is an important field to support The internal ice pressure (``sigP``) is the average of the normal stresses multiplied by :math:`-1` and is therefore simply equal to :math:`-\sigma_1/2`. -Following the approach of :cite:`Konig10` (see also :cite:`Lemieux16`), the -elliptical yield curve can be modified such that the ice has isotropic tensile strength. -The tensile strength :math:`T_p` is expressed as a fraction of the ice strength :math:`P`, that is :math:`T_p=k_t P` -where :math:`k_t` should be set to a value between 0 and 1 (this can -be changed at runtime with the namelist parameter ``Ktens``). The ice -strength :math:`P` is a function of the ice thickness distribution as -described in the `Icepack -Documentation`_. - .. _stress-vp: Viscous-Plastic @@ -395,28 +480,50 @@ Viscous-Plastic The VP constitutive law is given by .. math:: - \sigma_{ij} = 2 \eta \dot{\epsilon}_{ij} + (\zeta - \eta) D_D - P_R(1 - k_t)\frac{\delta_{ij}}{2} + \sigma_{ij} = 2 \eta \dot{\epsilon}_{ij} + (\zeta - \eta) D_D - P_R\frac{\delta_{ij}}{2} :label: vp-const -where :math:`\eta` and :math:`\zeta` are the bulk and shear viscosities. +where :math:`\eta` and :math:`\zeta` are the bulk and shear viscosities and +:math:`P_R` is a “replacement pressure” (see :cite:`Geiger98`, for example), +which serves to prevent residual ice motion due to spatial +variations of the ice strength :math:`P` when the strain rates are exactly zero. + An elliptical yield curve is used, with the viscosities given by .. math:: \zeta = {P(1+k_t)\over 2\Delta}, + :label: zeta .. math:: - \eta = {P(1+k_t)\over {2\Delta e^2}}, + \eta = e_g^{-2} \zeta, + :label: eta where .. math:: - \Delta = \left[D_D^2 + {1\over e^2}\left(D_T^2 + D_S^2\right)\right]^{1/2} + \Delta = \left[D_D^2 + {e_f^2\over e_g^4}\left(D_T^2 + D_S^2\right)\right]^{1/2}. + :label: Delta + +When the deformation :math:`\Delta` tends toward zero, the viscosities tend toward infinity. To avoid this issue, :math:`\Delta` needs to be limited and is replaced by :math:`\Delta^*` in equation :eq:`zeta`. Two methods for limiting :math:`\Delta` (or for capping the viscosities) are available in the code. If the namelist parameter ``capping_method`` is set to ``max``, :math:`\Delta^*=max(\Delta, \Delta_{min})` :cite:`Hibler79` while with ``capping_method`` set to ``sum``, the smoother formulation :math:`\Delta^*=(\Delta + \Delta_{min})` of :cite:`Kreyscher00` is used. -and :math:`P_R` is a “replacement pressure” (see :cite:`Geiger98`, for -example), which serves to prevent residual ice motion due to spatial -variations of :math:`P` when the rates of strain are exactly zero. +The ice strength :math:`P` is a function of the ice thickness distribution as +described in the `Icepack Documentation `_. + +Two other modifications to the standard VP rheology of :cite:`Hibler79` are available. +First, following the approach of :cite:`Konig10` (see also :cite:`Lemieux16`), the +elliptical yield curve can be modified such that the ice has isotropic tensile strength. +The tensile strength is expressed as a fraction of :math:`P`, that is :math:`k_t P` +where :math:`k_t` should be set to a value between 0 and 1 (this can +be changed at runtime with the namelist parameter ``Ktens``). -The parameter :math:`e` is the ratio of the major and minor axes of the elliptical yield curve, also called the ellipse aspect ratio. It can be changed using the namelist parameter ``e_ratio``. +Second, while :math:`e_f` is the ratio of the major and minor axes of the elliptical yield curve, the parameter +:math:`e_g` characterizes the plastic potential, i.e. another ellipse that decouples the flow rule from the +yield curve (:cite:`Ringeisen21`). :math:`e_f` and :math:`e_g` are respectively called ``e_yieldcurve`` and ``e_plasticpot`` in the code and +can be set in the namelist. The plastic potential can lead to more realistic fracture angles between linear kinematic features. :cite:`Ringeisen21` suggest to set :math:`e_f` to a value larger than 1 and to have :math:`e_g < e_f`. + +By default, the namelist parameters are set to :math:`e_f=e_g=2` and :math:`k_t=0` which correspond to the standard VP rheology. + +There are four options in the code for solving the sea ice momentum equation with a VP formulation: the standard EVP approach, a 1d EVP solver, the revised EVP approach and an implicit Picard solver. The choice of the capping method for the viscosities and the modifications to the yield curve and to the flow rule described above are available for these four different solution methods. Note that only the EVP and revised EVP methods are currently available if one chooses the C grid. .. _stress-evp: @@ -428,7 +535,7 @@ regularized version of the VP constitutive law :eq:`vp-const`. The constitutive .. math:: {1\over E}{\partial\sigma_1\over\partial t} + {\sigma_1\over 2\zeta} - + {P_R(1-k_t)\over 2\zeta} = D_D, \\ + + {P_R\over 2\zeta} = D_D, \\ :label: sig1 .. math:: @@ -449,38 +556,72 @@ for elastic waves, :math:`\Delta t_e < T < \Delta t`, as .. math:: E = {\zeta\over T}, -where :math:`T=E_\circ\Delta t` and :math:`E_\circ` (eyc) is a tunable +where :math:`T=E_\circ\Delta t` and :math:`E_\circ` (elasticDamp) is a tunable parameter less than one. Including the modification proposed by :cite:`Bouillon13` for equations :eq:`sig2` and :eq:`sig12` in order to improve numerical convergence, the stress equations become .. math:: \begin{aligned} {\partial\sigma_1\over\partial t} + {\sigma_1\over 2T} - + {P_R(1-k_t)\over 2T} &=& {P(1+k_t)\over 2T\Delta} D_D, \\ - {\partial\sigma_2\over\partial t} + {\sigma_2\over 2T} &=& {P(1+k_t)\over - 2Te^2\Delta} D_T,\\ + + {P_R\over 2T} &=& {\zeta \over T} D_D, \\ + {\partial\sigma_2\over\partial t} + {\sigma_2\over 2T} &=& {\eta \over + T} D_T,\\ {\partial\sigma_{12}\over\partial t} + {\sigma_{12}\over 2T} &=& - {P(1+k_t)\over 4Te^2\Delta}D_S.\end{aligned} + {\eta \over 2T}D_S.\end{aligned} Once discretized in time, these last three equations are written as .. math:: \begin{aligned} {(\sigma_1^{k+1}-\sigma_1^{k})\over\Delta t_e} + {\sigma_1^{k+1}\over 2T} - + {P_R^k(1-k_t)\over 2T} &=& {P(1+k_t)\over 2T\Delta^k} D_D^k, \\ - {(\sigma_2^{k+1}-\sigma_2^{k})\over\Delta t_e} + {\sigma_2^{k+1}\over 2T} &=& {P(1+k_t)\over - 2Te^2\Delta^k} D_T^k,\\ + + {P_R^k\over 2T} &=& {\zeta^k\over T} D_D^k, \\ + {(\sigma_2^{k+1}-\sigma_2^{k})\over\Delta t_e} + {\sigma_2^{k+1}\over 2T} &=& {\eta^k \over + T} D_T^k,\\ {(\sigma_{12}^{k+1}-\sigma_{12}^{k})\over\Delta t_e} + {\sigma_{12}^{k+1}\over 2T} &=& - {P(1+k_t)\over 4Te^2\Delta^k}D_S^k,\end{aligned} + {\eta^k \over 2T}D_S^k,\end{aligned} :label: sigdisc where :math:`k` denotes again the subcycling step. All coefficients on the left-hand side are constant except for :math:`P_R`. This modification compensates for the decreased efficiency of including -the viscosity terms in the subcycling. (Note that the viscosities do not -appear explicitly.) Choices of the parameters used to define :math:`E`, +the viscosity terms in the subcycling. Choices of the parameters used to define :math:`E`, :math:`T` and :math:`\Delta t_e` are discussed in Sections :ref:`revp` and :ref:`parameters`. +On the B grid, the stresses :math:`\sigma_{1}`, :math:`\sigma_{2}` and :math:`\sigma_{12}` are collocated at the U point. To calculate these stresses, the viscosities :math:`\zeta` and :math:`\eta` and the replacement pressure :math:`P_R` are also defined at the U point. + +However, on the C grid, :math:`\sigma_{1}` and :math:`\sigma_{2}` are collocated at the T point while :math:`\sigma_{12}` is defined at the U point. During a subcycling step, :math:`\zeta`, :math:`\eta` and :math:`P_R` are first calculated at the T point. To do so, :math:`\Delta` given by equation :eq:`Delta` is calculated following the approach of :cite:`Bouillon13` (see also :cite:`Kimmritz16` for details). With this approach, :math:`D_S^2` at the T point is obtained by calculating :math:`D_S^2` at the U points and interpolating these values to the T point. As :math:`\sigma_{12}` is calculated at the U point, :math:`\eta` also needs to be computed as these locations. If ``visc_method`` in the namelist is set to ``avg_zeta`` (the default value), :math:`\eta` at the U point is obtained by interpolating T point values to this location. This corresponds to the approach used by :cite:`Bouillon13` and the one associated with the C1 configuration of :cite:`Kimmritz16`. On the other hand, if ``visc_method = avg_strength``, the strength :math:`P` calculated at T points is interpolated to the U point and :math:`\Delta` is calculated at the U point in order to obtain :math:`\eta` following equations :eq:`zeta` and :eq:`eta`. This latter approach is the one used in the C2 configuration of :cite:`Kimmritz16`. + +.. _evp1d: + +1d EVP solver +~~~~~~~~~~~~~ + +The standard EVP solver iterates hundreds of times, where each iteration includes a communication through MPI and a limited number of calculations. This limits how much the solver can be optimized as the speed is primarily determined by the communication. The 1d EVP solver avoids the communication by utilizing shared memory, which removes the requirement for calls to the MPI communicator. As a consequence of this the potential scalability of the code is improved. The performance is best on shared memory but the solver is also functional on MPI and hybrid MPI/OpenMP setups as it will run on the master processor alone. + +The scalability of geophysical models is in general terms limited by the memory usage. In order to optimize this the 1d EVP solver solves the same equations that are outlined in the section :ref:`stress-evp` but it transforms all matrices to vectors (1d matrices) as this compiles better with the computer hardware. The vectorization and the contiguous placement of arrays in the memory makes it easier for the compiler to optimize the code and pass pointers instead of copying the vectors. The 1d solver is not supported for tripole grids and the code will abort if this combination is attempted. + +.. _revp: + +Revised EVP approach +~~~~~~~~~~~~~~~~~~~~ + +Introducing the numerical parameter :math:`\alpha=2T \Delta t_e ^{-1}` :cite:`Bouillon13`, the stress equations in :eq:`sigdisc` become + +.. math:: + \begin{aligned} + {\alpha (\sigma_1^{k+1}-\sigma_1^{k})} + {\sigma_1^{k}} + + {P_R^k} &=& 2 \zeta^k D_D^k, \\ + {\alpha (\sigma_2^{k+1}-\sigma_2^{k})} + {\sigma_2^{k}} &=& 2 \eta^k D_T^k,\\ + {\alpha (\sigma_{12}^{k+1}-\sigma_{12}^{k})} + {\sigma_{12}^{k}} &=& + \eta^k D_S^k,\end{aligned} + +where as opposed to the classic EVP, the second term in each equation is at iteration :math:`k` :cite:`Bouillon13`. Also, contrary to the classic EVP, +:math:`\Delta t_e` times the number of subcycles (or iterations) does not need to be equal to the advective time step :math:`\Delta t`. +Finally, as with the classic EVP approach, the stresses are initialized using the previous time level values. +The revised EVP is activated by setting the namelist parameter ``revised_evp = true``. +In the code :math:`\alpha` is ``arlx`` and :math:`\beta` is ``brlx`` (introduced in Section :ref:`revp-momentum`). The values of ``arlx`` and ``brlx`` can be set in the namelist. +It is recommended to use large values of these parameters and to set :math:`\alpha=\beta` :cite:`Kimmritz15`. + .. _stress-eap: Elastic-Anisotropic-Plastic @@ -668,78 +809,3 @@ rheology we compute the area loss rate due to ridging as Both ridging rate and sea ice strength are computed in the outer loop of the dynamics. - -.. _revp: - -**************** -Revised approach -**************** - -The revised EVP approach is based on a pseudo-time iterative scheme :cite:`Lemieux12`, :cite:`Bouillon13`, :cite:`Kimmritz15`. By construction, the revised EVP approach should lead to the VP solution -(given the right numerical parameters and a sufficiently large number of iterations). To do so, the inertial term is formulated such that it matches the backward Euler approach of -implicit solvers and there is an additional term for the pseudo-time iteration. Hence, with the revised approach, the discretized momentum equations :eq:`umom` and :eq:`vmom` become - -.. math:: - {\beta^*(u^{k+1}-u^k)\over\Delta t_e} + {m(u^{k+1}-u^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)} u^{k+1} - - {\left(mf+{\tt vrel}\sin\theta\right)} v^{k+1} - = {{\partial\sigma_{1j}^{k+1}\over\partial x_j}} - + {\tau_{ax} - mg{\partial H_\circ\over\partial x} } - + {\tt vrel} {\left(U_w\cos\theta-V_w\sin\theta\right)}, - :label: umomr - -.. math:: - {\beta^*(v^{k+1}-v^k)\over\Delta t_e} + {m(v^{k+1}-v^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)}v^{k+1} - + {\left(mf+{\tt vrel}\sin\theta\right)} u^{k+1} - = {{\partial\sigma_{2j}^{k+1}\over\partial x_j}} - + {\tau_{ay} - mg{\partial H_\circ\over\partial y} } - + {\tt vrel}{\left(U_w\sin\theta+V_w\cos\theta\right)}, - :label: vmomr - -where :math:`\beta^*` is a numerical parameter and :math:`u^n, v^n` are the components of the previous time level solution. -With :math:`\beta=\beta^* \Delta t \left( m \Delta t_e \right)^{-1}` :cite:`Bouillon13`, these equations can be written as - -.. math:: - \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} - - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{k+1} - = \underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} - + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} - + {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t}(\beta u^k + u^n), - :label: umomr2 - -.. math:: - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{k+1} - + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca}v^{k+1} - = \underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} - + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} - + {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t}(\beta v^k + v^n), - :label: vmomr2 - -At this point, the solutions :math:`u^{k+1}` and :math:`v^{k+1}` are obtained in the same manner as for the standard EVP approach (see equations :eq:`cevpuhat` to :eq:`cevpb`). - -Introducing another numerical parameter :math:`\alpha=2T \Delta t_e ^{-1}` :cite:`Bouillon13`, the stress equations in :eq:`sigdisc` become - -.. math:: - \begin{aligned} - {\alpha (\sigma_1^{k+1}-\sigma_1^{k})} + {\sigma_1^{k}} - + {P_R^k(1-k_t)} &=& {P(1+k_t)\over \Delta^k} D_D^k, \\ - {\alpha (\sigma_2^{k+1}-\sigma_2^{k})} + {\sigma_2^{k}} &=& {P(1+k_t)\over - e^2\Delta^k} D_T^k,\\ - {\alpha (\sigma_{12}^{k+1}-\sigma_{12}^{k})} + {\sigma_{12}^{k}} &=& - {P(1+k_t)\over 2e^2\Delta^k}D_S^k,\end{aligned} - -where as opposed to the classic EVP, the second term in each equation is at iteration :math:`k` :cite:`Bouillon13`. Also, as opposed to the classic EVP, -:math:`\Delta t_e` times the number of subcycles (or iterations) does not need to be equal to the advective time step :math:`\Delta t`. -Finally, as with the classic EVP approach, the stresses are initialized using the previous time level values. -The revised EVP is activated by setting the namelist parameter ``revised_evp = true``. -In the code :math:`\alpha` is ``arlx`` and :math:`\beta` is ``brlx``. The values of ``arlx`` and ``brlx`` can be set in the namelist. -It is recommended to use large values of these parameters and to set :math:`\alpha=\beta` :cite:`Kimmritz15`. - -.. _evp1d: - -**************** -1d EVP solver -**************** - -The standard EVP solver iterates hundreds of times, where each iteration includes a communication through MPI and a limited number of calculations. This limits how much the solver can be optimized as the speed is primarily determined by the communication. The 1d EVP solver avoids the communication by utilizing shared memory, which removes the requirement for calls to the MPI communicator. As a consequence of this the potential scalability of the code is improved. The performance is best on shared memory but the solver is also functional on MPI and hybrid MPI/OpenMP setups as it will run on the master processor alone. - -The scalability of geophysical models is in general terms limited by the memory usage. In order to optimize this the 1d EVP solver solves the same equations that are outlined in the section :ref:`stress-evp` but it transforms all matrices to vectors (1d matrices) as this compiles better with the computer hardware. The vectorization and the contiguous placement of arrays in the memory makes it easier for the compiler to optimize the code and pass pointers instead of copying the vectors. The 1d solver is not supported for tripole grids and the code will abort if this combination is attempted. diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 64264613c..a34c69822 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -247,7 +247,9 @@ grid_nml "", "``pop``", "pop thickness file in cm in ascii format", "" "``close_boundaries``", "logical", "force two gridcell wide land mask on boundaries for rectangular grids", "``.false.``" "``dxrect``", "real", "x-direction grid spacing for rectangular grid in cm", "0.0" + "``dxscale``", "real", "user defined rectgrid x-grid scale factor", "1.0" "``dyrect``", "real", "y-direction grid spacing for rectangular grid in cm", "0.0" + "``dyscale``", "real", "user defined rectgrid y-grid scale factor", "1.0" "``gridcpl_file``", "string", "input file for coupling grid info", "'unknown_gridcpl_file'" "``grid_atm``", "``A``", "atm forcing/coupling grid, all fields on T grid", "``A``" "", "``B``", "atm forcing/coupling grid, thermo fields on T grid, dyn fields on U grid", "" @@ -277,12 +279,15 @@ grid_nml "", "default", "ocean/land mask set internally, land in upper left and lower right of domain, ", "" "", "file", "ocean/land mask setup read from file, see kmt_file", "" "", "wall", "ocean/land mask set at right edge of domain", "" + "``latrefrect``","real","lower left corner lat for rectgrid in deg", "71.35" + "``lonrefrect``","real","lower left corner lon for rectgrid in deg", "-156.5" "``nblyr``", "integer", "number of zbgc layers", "0" "``ncat``", "integer", "number of ice thickness categories", "0" "``nfsd``", "integer", "number of floe size categories", "1" "``nilyr``", "integer", "number of vertical layers in ice", "0" "``nslyr``", "integer", "number of vertical layers in snow", "0" "``orca_halogrid``", "logical", "use orca haloed grid for data/grid read", "``.false.``" + "``scale_dxdy``", "logical", "apply dxscale, dyscale to rectgrid", "``false``" "``use_bathymetry``", "logical", "use read in bathymetry file for seabedstress option", "``.false.``" "", "", "", "" @@ -398,9 +403,6 @@ thermo_nml "``Rac_rapid_mode``", "real", "critical Rayleigh number", "10.0" "", "", "", "" -.. - ktherm=0 has been deprecated - "", "``0``", "zero-layer thermodynamic model", "" .. _dynamics_nml: @@ -464,7 +466,7 @@ dynamics_nml "``k1``", "real", "1st free parameter for landfast parameterization", "7.5" "``k2``", "real", "2nd free parameter (N/m\ :math:`^3`) for landfast parameterization", "15.0" "``maxits_fgmres``", "integer", "maximum number of restarts for FGMRES solver", "1" - "``maxits_nonlin``", "integer", "maximum number of nonlinear iterations for VP solver", "1000" + "``maxits_nonlin``", "integer", "maximum number of nonlinear iterations for VP solver", "10" "``maxits_pgmres``", "integer", "maximum number of restarts for PGMRES preconditioner", "1" "``monitor_fgmres``", "logical", "write velocity norm at each FGMRES iteration", "``.false.``" "``monitor_nonlin``", "logical", "write velocity norm at each nonlinear iteration", "``.false.``" @@ -477,7 +479,7 @@ dynamics_nml "", "``ident``", "Don't use a preconditioner for the FGMRES solver", "" "", "``pgmres``", "Use GMRES as preconditioner for FGMRES solver", "" "``Pstar``", "real", "constant in Hibler strength formula (N/m\ :math:`^2`)", "2.75e4" - "``reltol_fgmres``", "real", "relative tolerance for FGMRES solver", "1e-2" + "``reltol_fgmres``", "real", "relative tolerance for FGMRES solver", "1e-1" "``reltol_nonlin``", "real", "relative tolerance for nonlinear solver", "1e-8" "``reltol_pgmres``", "real", "relative tolerance for PGMRES preconditioner", "1e-6" "``revised_evp``", "logical", "use revised EVP formulation", "``.false.``" @@ -533,7 +535,7 @@ ponds_nml "``frzpnd``", "``cesm``", "CESM pond refreezing forumulation", "``cesm``" "", "``hlid``", "Stefan refreezing with pond ice thickness", "" "``hp1``", "real", "critical ice lid thickness for topo ponds in m", "0.01" - "``hs0``", "real", "snow depth of transition to bare sea ice in m", "0.03" + "``hs0``", "real", "snow depth of transition to bare sea ice in m", "" "``hs1``", "real", "snow depth of transition to pond ice in m", "0.03" "``pndaspect``", "real", "aspect ratio of pond changes (depth:area)", "0.8" "``rfracmax``", ":math:`0 \le r_{max} \le 1`", "maximum melt water added to ponds", "0.85" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 5d6460126..a7cc66948 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -81,8 +81,8 @@ this tool. Grid, boundary conditions and masks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The spatial discretization is specialized for a generalized orthogonal -B-grid as in :cite:`Murray96` or +The spatial discretization of the original implementation is specialized +for a generalized orthogonal B-grid as in :cite:`Murray96` or :cite:`Smith95`. Figure :ref:`fig-Bgrid` is a schematic of CICE B-grid. This cell with the tracer point :math:`t(i,j)` in the middle is referred to as T-cell. The ice and snow area, volume and energy are @@ -92,18 +92,17 @@ corner. The other corners of the T-cell are northwest (NW), southwest (SW) and southeast (SE). The lengths of the four edges of the T-cell are respectively HTN, HTW, HTS and HTE for the northern, western, southern and eastern edges. The lengths of the T-cell through the -middle are respectively dxt and dyt along the x and y axis. +middle are respectively dxT and dyT along the x and y axis. We also occasionally refer to “U-cells,” which are centered on the northeast corner of the corresponding T-cells and have velocity in the center of each. The velocity components are aligned along grid lines. The internal ice stress tensor takes four different values within a grid -cell; bilinear approximations are used for the stress tensor and the ice +cell with the B-grid implementation; bilinear approximations are used for the stress tensor and the ice velocity across the cell, as described in :cite:`Hunke02`. This tends to avoid the grid decoupling problems associated with the -B-grid. EVP is available on the C-grid through the MITgcm code -distribution, http://mitgcm.org/viewvc/MITgcm/MITgcm/pkg/seaice/. +B-grid. .. _fig-Bgrid: @@ -113,7 +112,24 @@ distribution, http://mitgcm.org/viewvc/MITgcm/MITgcm/pkg/seaice/. Schematic of CICE B-grid. -The user has several choices of grid routines: *popgrid* reads grid +The ability to solve on the C and CD grids was added later. With the C-grid, +the u velocity points are located on the E edges and the v velocity points +are located on the N edges of the T cell rather than at the T cell corners. +On the CD-grid, the u and v velocity points are located on both the N and E edges. +To support this capability, N and E grids were added to the existing T and U grids, +and the N and E grids are defined at the northern and eastern edge of the T cell. +This is shown in Figure :ref:`fig-Cgrid`. + +.. _fig-Cgrid: + +.. figure:: ./figures/CICE_Cgrid.png + :align: center + :scale: 55% + + Schematic of CICE CD-grid. + + +The user has several ways to initialize the grid: *popgrid* reads grid lengths and other parameters for a nonuniform grid (including tripole and regional grids), and *rectgrid* creates a regular rectangular grid. The input files **global\_gx3.grid** and **global\_gx3.kmt** contain the @@ -124,6 +140,35 @@ and **global\_tx1.kmt** contain the :math:`\left<1^\circ\right>` POP tripole grid and land mask. These are binary unformatted, direct access, Big Endian files. +The input grid file for the B-grid and CD-grid is identical. That file +contains each cells' HTN, HTE, ULON, ULAT, and kmt value. From those +variables, the longitude, latitude, grid lengths (dx and dy), areas, +and masks can be derived for all grids. Table :ref:`tab-gridvars` lists +the primary prognostic grid variable names on the different grids. + +.. _tab-gridvars: + +.. table:: Primary CICE Prognostic Grid Variable Names + + +----------------+-------+-------+-------+-------+ + | variable | T | U | N | E | + +================+=======+=======+=======+=======+ + | longitude | TLON | ULON | NLON | ELON | + +----------------+-------+-------+-------+-------+ + | latitude | TLAT | ULAT | NLAT | ELAT | + +----------------+-------+-------+-------+-------+ + | dx | dxT | dxU | dxN | dxE | + +----------------+-------+-------+-------+-------+ + | dy | dyT | dyU | dyN | dyE | + +----------------+-------+-------+-------+-------+ + | area | tarea | uarea | narea | earea | + +----------------+-------+-------+-------+-------+ + | mask (logical) | tmask | umask | nmask | emask | + +----------------+-------+-------+-------+-------+ + | mask (real) | hm | uvm | npm | epm | + +----------------+-------+-------+-------+-------+ + + In CESM, the sea ice model may exchange coupling fluxes using a different grid than the computational grid. This functionality is activated using the namelist variable ``gridcpl_file``. @@ -267,6 +312,34 @@ Special treatment is also required in the scattering routine, and when computing global sums one of each pair of coincident points has to be excluded. +***************** +Rectangular grids +***************** + +Rectangular test grids can be defined for CICE. They are generated +internally and defined by several namelist +settings including ``grid_type`` = ``rectangular``, ``nx_global``, ``ny_global``, +``dx_rect``, ``dy_rect``, ``lonrefrect``, and ``latrefrect``. Forcing and +initial condition can be set via namelists ``atm_data_type``, ``ocn_data_type``, +``ice_data_type``, ``ice_data_conc``, ``ice_data_dist``. Variable grid spacing +is also supported with the namelist settings ``scale_dxdy`` which turns on +the option, and ``dxscale`` and ``dyscale`` which sets the variable grid scaling +factor. Values of 1.0 will produced constant grid spacing. For rectangular grids, +``lonrefrect`` and ``latrefrect`` define the lower left longitude and latitude +value of the grid, ``dx_rect`` and ``dy_rect`` define the base grid spacing, and +``dxscale`` and ``dyscale`` provide the grid space scaling. The base spacing +is set in the center of the rectangular domain and the scaling is applied symetrically +outward as a multiplicative factor in the x and y directions. + +Several predefined rectangular grids are available in CICE with +**cice.setup --grid** including ``gbox12``, ``gbox80``, ``gbox128``, and ``gbox180`` +where 12, 80, 128, and 180 are the number of gridcells in each direction. +Several predefined options also exist, set with **cice.setup --set**, to +establish varied idealized configurations of box tests including ``box2001``, +``boxadv``, ``boxchan``, ``boxnodyn``, ``boxrestore``, ``boxslotcyl``, and +``boxopen``, ``boxclosed``, and ``boxforcee``. See **cice.setup --help** for a current +list of supported settings. + ************** Vertical Grids ************** @@ -287,14 +360,20 @@ routines, is adopted from POP. The boundary routines perform boundary communications among processors when MPI is in use and among blocks whenever there is more than one block per processor. -Open/cyclic boundary conditions are the default in CICE. Closed boundary -conditions are not supported currently. The physical -domain can still be closed using the land mask and this can be done in -namelist with the ``close_boundaries`` namelist which forces the mask -on the boundary to land for a two gridcell depth. In our bipolar, -displaced-pole grids, one row of grid cells along the north and south -boundaries is located on land, and along east/west domain boundaries not -masked by land, periodic conditions wrap the domain around the globe. +Boundary conditions are defined by the ``ns_boundary_type`` and ``ew_boundary_type`` +namelist inputs. Valid values are ``open`` and ``cyclic``. In addition, +``tripole`` and ``tripoleT`` are options for the ``ns_boundary_type``. +Closed boundary conditions are not supported currently. +The domain can be physically closed with the ``close_boundaries`` +namelist which forces a land mask on the boundary with a two gridcell depth. +Where the boundary is land, the boundary_type settings play no role. +For example, in the displaced-pole grids, at least one row of grid cells along the north +and south boundaries is land. Along the east/west domain boundaries not +masked by land, periodic conditions wrap the domain around the globe. In +this example, +the appropriate namelist settings are ``nsboundary_type`` = ``open``, +``ew_boundary_type`` = ``cyclic``, and ``close_boundaries`` = ``.false.``. + CICE can be run on regional grids with open boundary conditions; except for variables describing grid lengths, non-land halo cells along the grid edge must be filled by restoring them to specified values. The @@ -327,27 +406,36 @@ testing. Masks ***** -A land mask hm (:math:`M_h`) is specified in the cell centers, with 0 -representing land and 1 representing ocean cells. A corresponding mask -uvm (:math:`M_u`) for velocity and other corner quantities is given by +A land mask hm (:math:`M_h`) is specified in the cell centers (on the +T-grid), with 0 +representing land and 1 representing ocean cells. Corresponding masks +for the U, N, and E grids are given by .. math:: M_u(i,j)=\min\{M_h(l),\,l=(i,j),\,(i+1,j),\,(i,j+1),\,(i+1,j+1)\}. -The logical masks ``tmask`` and ``umask`` (which correspond to the real masks -``hm`` and ``uvm``, respectively) are useful in conditional statements. +.. math:: + M_n(i,j)=\min\{M_h(l),\,l=(i,j),\,(i,j+1)\}. + +.. math:: + M_e(i,j)=\min\{M_h(l),\,l=(i,j),\,(i+1,j)\}. + +The logical masks ``tmask``, ``umask``, ``nmask``, and ``emask`` +(which correspond to the real masks ``hm``, ``uvm``, ``npm``, and ``epm`` +respectively) are useful in conditional statements. In addition to the land masks, two other masks are implemented in *dyn\_prep* in order to reduce the dynamics component’s work on a global -grid. At each time step the logical masks ``ice_tmask`` and ``ice_umask`` are +grid. At each time step the logical masks ``iceTmask`` and ``iceUmask`` are determined from the current ice extent, such that they have the value “true” wherever ice exists. They also include a border of cells around the ice pack for numerical purposes. These masks are used in the dynamics component to prevent unnecessary calculations on grid points where there is no ice. They are not used in the thermodynamics component, so that ice may form in previously ice-free cells. Like the -land masks ``hm`` and ``uvm``, the ice extent masks ``ice_tmask`` and ``ice_umask`` -are for T-cells and U-cells, respectively. +land masks ``hm`` and ``uvm``, the ice extent masks ``iceTmask`` and ``iceUmask`` +are for T-cells and U-cells, respectively. Note that the ice extent masks +``iceEmask`` and ``iceNmask`` are also defined when using the C or CD grid. Improved parallel performance may result from utilizing halo masks for boundary updates of the full ice state, incremental remapping transport, @@ -363,6 +451,122 @@ or southern hemispheres, respectively. Special constants (``spval`` and points in the history files and diagnostics. +.. _interpolation: + +**************************** +Interpolating between grids +**************************** + +Fields in CICE are generally defined at particular grid locations, such as T cell centers, +U corners, or N or E edges. These are assigned internally in CICE based on the ``grid_ice`` +namelist variable. Forcing/coupling fields are also associated with a +specific set of grid locations that may or may not be the same as on the internal CICE model grid. +The namelist variables ``grid_atm`` and ``grid_ocn`` define the forcing/coupling grids. +The ``grid_ice``, ``grid_atm``, and ``grid_ocn`` variables are independent and take +values like ``A``, ``B``, ``C``, or ``CD`` consistent with the Arakawa grid convention :cite:`Arakawa77`. +The relationship between the grid system and the internal grids is shown in :ref:`tab-gridsys`. + +.. _tab-gridsys: + +.. table:: Grid System and Type Definitions + :align: center + + +--------------+----------------+----------------+----------------+ + | grid system | thermo grid | u dynamic grid | v dynamic grid | + +==============+================+================+================+ + | A | T | T | T | + +--------------+----------------+----------------+----------------+ + | B | T | U | U | + +--------------+----------------+----------------+----------------+ + | C | T | E | N | + +--------------+----------------+----------------+----------------+ + | CD | T | N+E | N+E | + +--------------+----------------+----------------+----------------+ + +For all grid systems, thermodynamic variables are always defined on the ``T`` grid for the model and +model forcing/coupling fields. However, the dynamics u and v fields vary. +In the ``CD`` grid, there are twice as many u and v fields as on the other grids. Within the CICE model, +the variables ``grid_ice_thrm``, ``grid_ice_dynu``, ``grid_ice_dynv``, ``grid_atm_thrm``, +``grid_atm_dynu``, ``grid_atm_dynv``, ``grid_ocn_thrm``, ``grid_ocn_dynu``, and ``grid_ocn_dynv`` are +character strings (``T``, ``U``, ``N``, ``E`` , ``NE``) derived from the ``grid_ice``, ``grid_atm``, +and ``grid_ocn`` namelist values. + +The CICE model has several internal methods that will interpolate (a.k.a. map or average) fields on +(``T``, ``U``, ``N``, ``E``, ``NE``) grids to (``T``, ``U``, ``N``, ``E``). An interpolation +to an identical grid results in a field copy. The generic interface to this method is ``grid_average_X2Y``, +and there are several forms. + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1,grid1,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1 ! work1 grid (T, U, N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U, N, E) + +where type is an interpolation type with the following valid values, + +type = ``S`` is a normalized, masked, area-weighted interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (M_{1i}A_{1i}work1_{i})} {\sum_{i=1}^{n} (M_{1i}A_{1i})} + +type = ``A`` is a normalized, unmasked, area-weighted interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (A_{1i}work1_{i})} {\sum_{i=1}^{n} (A_{1i})} + +type = ``F`` is a normalized, unmasked, conservative flux interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (A_{1i}work1_{i})} {n*A_{2}} + +with A defined as the appropriate gridcell area and M as the gridcell mask. +Another form of the ``grid_average_X2Y`` is + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1,grid1,wght1,mask1,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1(:,:,:) ! input field(nx_block, ny_block, max_blocks) + real (kind=dbl_kind), intent(in) :: wght1(:,:,:) ! input weight(nx_block, ny_block, max_blocks) + real (kind=dbl_kind), intent(in) :: mask1(:,:,:) ! input mask(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1 ! work1 grid (T, U, N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U, N, E) + +In this case, the input arrays `wght1` and `mask1` are used in the interpolation equations instead of gridcell +area and mask. This version allows the user to define the weights and mask +explicitly. This implementation is supported only for type = ``S`` or ``A`` interpolations. + +A final form of the ``grid_average_X2Y`` interface is + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1a,grid1a,work1b,grid1b,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1a(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1a ! work1 grid (N, E) + real (kind=dbl_kind), intent(in) :: work1b(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1b ! work1 grid (N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U) + +This version supports mapping from an ``NE`` grid to a ``T`` or ``U`` grid. In this case, the ``1a`` arguments +are for either the `N` or `E` field and the 1b arguments are for the complementary field (``E`` or ``N`` respectively). +At present, only ``S`` type mappings are supported with this interface. + +In all cases, the work1, wght1, and mask1 input arrays should have correct halo values when called. Examples of usage +can be found in the source code, but the following example maps the uocn and vocn fields from their native +forcing/coupling grid to the ``U`` grid using a masked, area-weighted, average method. + +.. code-block:: fortran + + call grid_average_X2Y('S', uocn, grid_ocn_dynu, uocnU, 'U') + call grid_average_X2Y('S', vocn, grid_ocn_dynv, vocnU, 'U') + + .. _performance: *************** @@ -462,7 +666,9 @@ block equally. This is useful in POP which always has work in each block and is written with a lot of array syntax requiring calculations over entire blocks (whether or not land is present). This option is provided in CICE as well for -direct-communication compatibility with POP. The ‘latitude’ option +direct-communication compatibility with POP. Blocks that contain 100% +land grid cells are eliminated with 'block'. The 'blockall' option is identical +to 'block' but does not do land block elimination. The ‘latitude’ option weights the blocks based on latitude and the number of ocean grid cells they contain. Many of the non-cartesian decompositions support automatic land block elimination and provide alternative ways to @@ -587,7 +793,8 @@ The internal variables ``istep``, ``istep0``, and ``istep1`` keep track of the number of timesteps. ``istep`` is the counter for the current run and is set to 0 at the start of each run. ``istep0`` is the step count at the start of a long multi-restart run, and -``istep1`` is the step count of a long multi-restart run. +``istep1`` is the step count of a long multi-restart run and +is continuous across model restarts. In general, the time manager should be advanced by calling *advance\_timestep*. This subroutine in **ice\_calendar.F90** @@ -599,10 +806,68 @@ The namelist variable ``use_restart_time`` specifies whether to use the time and step numbers saved on a restart file or whether to set the initial model time to the namelist values defined by ``year_init``, ``month_init``, ``day_init``, and ``sec_init``. -Normally, ``use_restart_time`` is set to false on the initial run -and then set to true on subsequent restart runs of the same -case to allow time to advance thereafter. More information about -the restart capability can be found here, :ref:`restartfiles`. +Normally, ``use_restart_time`` is set to false on the initial run. +In continue mode, use_restart_time is ignored and the restart +date is always used to initialize the model run. +More information about the restart capability can be found in :ref:`restartfiles`. + +Several different calendars are supported including noleap (365 days +per year), 360-day (twelve 30 day months per year), and gregorian +(leap days every 4 years except every 100 years except every 400 +years). The gregorian calendar in CICE is formally a proleptic gregorian +calendar without any discontinuties over time. The calendar is set +by specifying ``days_per_year`` and ``use_leap_years`` in the +namelist, and the following combinations are supported, + +.. _tab-cal: + +.. table:: Supported Calendar Options + + +----------------------+----------------------+------------+ + | ``days_per_year`` | ``use_leap_years`` | calendar | + +======================+======================+============+ + | 365 | false | noleap | + +----------------------+----------------------+------------+ + | 365 | true | gregorian | + +----------------------+----------------------+------------+ + | 360 | false | 360-day | + +----------------------+----------------------+------------+ + + +The history (:ref:`history`) and restart (:ref:`restartfiles`) +outputs and frequencies are specified in namelist and +are computed relative to a reference date defined by the namelist +``histfreq_base`` and ``dumpfreq_base``. Valid values for each are +`zero` and `init`. If set to `zero`, all output will be relative +to the absolute reference year-month-day date, 0000-01-01. This is the default +value for ``histfreq_base``, so runs with different initial +dates will have identical output. If the ``histfreq_base`` or +``dumpfreq_base`` are set to `init`, all frequencies +will be relative to the model initial date specified by ``year_init``, +``month_init``, and ``day_init``. ``sec_init`` plays no role +in setting output frequencies. `init` is the default for +``dumpfreq_base`` and makes it easy to generate restarts +5 or 10 model days after startup as we often do in testing. + +In general, output is always +written at the start of the year, month, day, or hour without +any ability to shift the phase. For instance, monthly output +is always written on the first of the month. It is not possible, +for instance, to write monthly data once a month on the 10th of the month. +In the same way, quarterly data for Dec-Jan-Feb vs Jan-Feb-Mar +is not easily controlled. A better approach is to create monthly +data and then to aggregate to quarters as a post-processing step. +The history and restart (``histfreq``, ``dumpfreq``) setting `1` +indicates output at a frequency of timesteps. This is the character +`1` as opposed to the integer 1. This frequency output +is computed using ``istep1``, the model timestep. This +may vary with each run depending on several factors including the +model timestep, initial date, and value of ``istep0``. + +The model year is limited by some integer math. In particular, calculation +of elapsed hours in **ice\_calendar.F90**, and the model year is +limited to the value of ``myear_max`` set in that file. Currently, that's +200,000 years. The time manager was updated in early 2021. The standalone model was modified, and some tests were done in a coupled framework after @@ -653,18 +918,70 @@ layers and the ice thickness distribution defined by ``kcatbound`` = 0. Restart information for some tracers is also included in the netCDF restart files. -Three namelist variables control model initialization, ``ice_ic``, ``runtype``, -and ``restart``, as described in :ref:`tab-ic`. It is possible to do an -initial run from a file **filename** in two ways: (1) set runtype = -‘initial’, restart = true and ice\_ic = **filename**, or (2) runtype = -‘continue’ and pointer\_file = **./restart/ice.restart\_file** where -**./restart/ice.restart\_file** contains the line -“./restart/[filename]". The first option is convenient when repeatedly -starting from a given file when subsequent restart files have been -written. With this arrangement, the tracer restart flags can be set to -true or false, depending on whether the tracer restart data exist. With -the second option, tracer restart flags are set to ‘continue’ for all -active tracers. +Three namelist variables generally control model initialization, ``runtype``, +``ice_ic``, and ``use_restart_time``. The valid values for ``runtype`` +are ``initial`` or ``continue``. When ``runtype`` = `continue`, the +restart filename is stored in a small text (pointer) file, ``use_restart_time`` +is forced to true and ``ice_ic`` plays no role. When ``runtype`` = +`initial`, ``ice_ic`` has three options, ``none``, ``internal``, +or *filename*. These initial states are no-ice, namelist driven initial +condition, and ice defined by a file respectively. If ``ice_ic`` is set +to ``internal``, the initial state is defined by the namelist values +``ice_data_type``, ``ice_data_dist``, and ``ice_data_conc``. In `initial` mode, +``use_restart_time`` should generally be set to false and the initial +time is then defined by ``year_init``, ``month_init``, ``day_init``, +and ``sec_init``. These combinations options are summarized in +:ref:`tab-ic`. + +Restart files and initial condition files are generally the same format and +can be the same files. +They contain the model state from a particular instance in time. In general, +that state includes the physical and dynamical state as well as the +state of optional tracers. Reading of various tracer groups can +be independently controlled by various restart flags. In other +words, a restart file can be used to initialize a new configuration +where new tracers are used (i.e. bgc). In that case, the physical +state of the model will be read, but if bgc tracers don't exist on the +restart file, they can be initialized from scratch. + +In ``continue`` mode, a pointer file is used to restart the model. +In this mode, the CICE model writes out a small text (pointer) file +to the run directory that names the most recent restart file. On +restart, the model reads the pointer file which defines the +name of the restart file. The model then reads that restart file. +By having this feature, the ice namelist does not need to be constantly +updated with the latest +restart filename, and the model can be automatically resubmitted. +Manually editing the pointer file in the middle of a run will reset +the restart filename and allow the run to continue. + +Table :ref:`tab-ic` shows ``runtype``, ``ice_ic``, and ``use_restart_time`` +namelist combinations for initializing +the model. If namelist defines the start date, it's done with +``year_init``, ``month_init``, ``day_init``, and ``sec_init``. + +.. _tab-ic: + +.. table:: Ice Initialization + + +----------------+--------------------------+--------------------------------------+----------------------------------------+ + | ``runtype`` | ``ice_ic`` | ``use_restart_time`` | Note | + +================+==========================+======================================+========================================+ + | `initial` | `none` | not used | no ice, | + | | | | namelist defines start date | + +----------------+--------------------------+--------------------------------------+----------------------------------------+ + | `initial` | `internal` or | not used | set by namelist ice_data_type, | + | | `default` | | ice_data_dist, ice_data_conc | + +----------------+--------------------------+--------------------------------------+----------------------------------------+ + | `initial` | *filename* | false | read ice state from filename, | + | | | | namelist defines start date | + +----------------+--------------------------+--------------------------------------+----------------------------------------+ + | `initial` | *filename* | true | read ice state from filename, | + | | | | restart file defines start date | + +----------------+--------------------------+--------------------------------------+----------------------------------------+ + | `continue` | not used | not used | pointer file defines restart file, | + | | | | restart file defines start date | + +----------------+--------------------------+--------------------------------------+----------------------------------------+ An additional namelist option, ``restart_ext`` specifies whether halo cells are included in the restart files. This option is useful for tripole and @@ -684,27 +1001,6 @@ and are intended merely to provide guidance for the user to write his or her own routines. Whether the code is to be run in stand-alone or coupled mode is determined at compile time, as described below. -Table :ref:`tab-ic` shows ice initial state resulting from combinations of -``ice_ic``, ``runtype`` and ``restart``. :math:`^a`\ If false, restart is reset to -true. :math:`^b`\ restart is reset to false. :math:`^c`\ ice_ic is -reset to ‘none.’ - -.. _tab-ic: - -.. table:: Ice Initial State - - +----------------+--------------------------+--------------------------------------+----------------------------------------+ - | ice\_ic | | | | - +================+==========================+======================================+========================================+ - | | initial/false | initial/true | continue/true (or false\ :math:`^a`) | - +----------------+--------------------------+--------------------------------------+----------------------------------------+ - | none | no ice | no ice\ :math:`^b` | restart using **pointer\_file** | - +----------------+--------------------------+--------------------------------------+----------------------------------------+ - | default | SST/latitude dependent | SST/latitude dependent\ :math:`^b` | restart using **pointer\_file** | - +----------------+--------------------------+--------------------------------------+----------------------------------------+ - | **filename** | no ice\ :math:`^c` | start from **filename** | restart using **pointer\_file** | - +----------------+--------------------------+--------------------------------------+----------------------------------------+ - .. _parameters: ********************************** @@ -763,9 +1059,9 @@ t_e`) is thus .. math:: dte = dt\_dyn/ndte. -A second parameter, :math:`E_\circ` (``eyc``), defines the elastic wave +A second parameter, :math:`E_\circ` (``elasticDamp``), defines the elastic wave damping timescale :math:`T`, described in Section :ref:`dynam`, as -``eyc * dt_dyn``. The forcing terms are not updated during the subcycling. +``elasticDamp * dt_dyn``. The forcing terms are not updated during the subcycling. Given the small step (``dte``) at which the EVP dynamics model is subcycled, the elastic parameter :math:`E` is also limited by stability constraints, as discussed in :cite:`Hunke97`. Linear stability @@ -774,10 +1070,7 @@ stable as long as the subcycling time step :math:`\Delta t_e` sufficiently resolves the damping timescale :math:`T`. For the stability analysis we had to make several simplifications of the problem; hence the location of the boundary between stable and unstable regions is -merely an estimate. In practice, the ratio -:math:`\Delta t_e ~:~ T ~:~ \Delta t`  = 1 : 40 : 120 provides both -stability and acceptable efficiency for time steps (:math:`\Delta t`) on -the order of 1 hour. +merely an estimate. The current default parameters for the EVP and EAP are :math:`ndte=240` and :math:`E_\circ=0.36`. For high resolution applications, it is however recommended to increase the value of :math:`ndte` :cite:`Koldunov19`, :cite:`Bouchat22`. Note that only :math:`T` and :math:`\Delta t_e` figure into the stability of the dynamics component; :math:`\Delta t` does not. Although @@ -819,7 +1112,8 @@ format approach or style for some io packages. Model output data can be written as instantaneous or average data as specified by the ``hist_avg`` namelist flag. The data is written at the period(s) given by ``histfreq`` and -``histfreq_n``, and written to binary or netCDF files prepended by ``history_file`` +``histfreq_n`` relative to a reference date specified by ``histfreq_base``. +The files are written to binary or netCDF files prepended by ``history_file`` in **ice_in**. These settings for history files are set in the **setup_nml** section of **ice_in** (see :ref:`tabnamelist`). If ``history_file`` = ‘iceh’ then the @@ -858,7 +1152,9 @@ files, no matter what the frequency is.) If there are no namelist flags with a given ``histfreq`` value, or if an element of ``histfreq_n`` is 0, then no file will be written at that frequency. The output period can be discerned from the filenames. All history streams will be either instantaneous -or averaged as specified by the ``hist_avg`` namelist setting. +or averaged as specified by the ``hist_avg`` namelist setting and the frequency +will be relative to a reference date specified by ``histfreq_base``. More +information about how the frequency is computed is found in :ref:`timemanager`. For example, in the namelist: @@ -866,6 +1162,7 @@ For example, in the namelist: histfreq = ’1’, ’h’, ’d’, ’m’, ’y’ histfreq_n = 1, 6, 0, 1, 1 + histfreq_base = 'zero' hist_avg = .true. f_hi = ’1’ f_hs = ’h’ @@ -947,15 +1244,33 @@ output is written to a log file. The log file unit to which diagnostic output is written is set in **ice\_fileunits.F90**. If ``diag_type`` = ‘stdout’, then it is written to standard out (or to **ice.log.[ID]** if you redirect standard out as in **cice.run**); otherwise it is written -to the file given by ``diag_file``. In addition to the standard diagnostic +to the file given by ``diag_file``. + +In addition to the standard diagnostic output (maximum area-averaged thickness, velocity, average albedo, total ice area, and total ice and snow volumes), the namelist options ``print_points`` and ``print_global`` cause additional diagnostic information to be computed and written. ``print_global`` outputs global sums that are useful for checking global conservation of mass and energy. -``print_points`` writes data for two specific grid points. Currently, one +``print_points`` writes data for two specific grid points defined by the +input namelist ``lonpnt`` and ``latpnt``. By default, one point is near the North Pole and the other is in the Weddell Sea; these -may be changed in **ice\_in**. +may be changed in **ice\_in**. + +The namelist ``debug_model`` prints detailed +debug diagnostics for a single point as the model advances. The point is defined +by the namelist ``debug_model_i``, ``debug_model_j``, ``debug_model_iblk``, +and ``debug_model_task``. These are the local i, j, block, and mpi task index values +of the point to be diagnosed. This point is defined in local index space +and can be values in the array halo. If the local point is not defined in +namelist, the point associated with ``lonpnt(1)`` and ``latpnt(1)`` is used. +``debug_model`` is normally used when the model aborts and needs to be debugged +in detail at a particular (usually failing) grid point. + +Memory use diagnostics are controlled by the logical namelist ``memory_stats``. +This feature uses an intrinsic query in C defined in **ice\_memusage\_gptl.c**. +Memory diagnostics will be written at the the frequency defined by +diagfreq. Timers are declared and initialized in **ice\_timers.F90**, and the code to be timed is wrapped with calls to *ice\_timer\_start* and @@ -1014,15 +1329,17 @@ The timers use *MPI\_WTIME* for parallel runs and the F90 intrinsic +--------------+-------------+----------------------------------------------------+ | 14 | Bound | boundary conditions and subdomain communications | +--------------+-------------+----------------------------------------------------+ - | 15 | BGC | biogeochemistry, part of Thermo timer | + | 15 | BundBound | halo update bundle copy | +--------------+-------------+----------------------------------------------------+ - | 16 | Forcing | forcing | + | 16 | BGC | biogeochemistry, part of Thermo timer | +--------------+-------------+----------------------------------------------------+ - | 17 | 1d-evp | 1d evp, part of Dynamics timer | + | 17 | Forcing | forcing | +--------------+-------------+----------------------------------------------------+ - | 18 | 2d-evp | 2d evp, part of Dynamics timer | + | 18 | 1d-evp | 1d evp, part of Dynamics timer | +--------------+-------------+----------------------------------------------------+ - | 19 | UpdState | update state | + | 19 | 2d-evp | 2d evp, part of Dynamics timer | + +--------------+-------------+----------------------------------------------------+ + | 20 | UpdState | update state | +--------------+-------------+----------------------------------------------------+ .. _restartfiles: @@ -1043,7 +1360,8 @@ format approach or style for some io packages. The restart files created by CICE contain all of the variables needed for a full, exact restart. The filename begins with the character string ‘iced.’, and the restart dump frequency is given by the namelist -variables ``dumpfreq`` and ``dumpfreq_n``. The pointer to the filename from +variables ``dumpfreq`` and ``dumpfreq_n`` relative to a reference date +specified by ``dumpfreq_base``. The pointer to the filename from which the restart data is to be read for a continuation run is set in ``pointer_file``. The code assumes that auxiliary binary tracer restart files will be identified using the same pointer and file name prefix, @@ -1055,9 +1373,9 @@ Additional namelist flags provide further control of restart behavior. ``dump_last`` = true causes a set of restart files to be written at the end of a run when it is otherwise not scheduled to occur. The flag ``use_restart_time`` enables the user to choose to use the model date -provided in the restart files. If ``use_restart_time`` = false then the +provided in the restart files for initial runs. If ``use_restart_time`` = false then the initial model date stamp is determined from the namelist parameters, -``year_init``, ``month_init``, ``day_init``, and ``sec_init``.. +``year_init``, ``month_init``, ``day_init``, and ``sec_init``. lcdf64 = true sets 64-bit netCDF output, allowing larger file sizes. Routines for gathering, scattering and (unformatted) reading and writing @@ -1069,14 +1387,7 @@ restarts on the various tripole grids. They are accessed by setting available when using PIO; in this case extra halo update calls fill ghost cells for tripole grids (do not use PIO for regional grids). -Two netCDF restart files are available for the CICE v5 and v6 code distributions +Restart files are available for the CICE code distributions for the gx3 and gx1 grids (see :ref:`force` for information about obtaining these files). -They were created using the default v5 model -configuration, but -initialized with no ice. The gx3 case was run for 1 year using the 1997 -forcing data provided with the code. The gx1 case was run for 20 years, -so that the date of restart in the file is 1978-01-01. Note that the -restart dates provided in the restart files can be overridden using the -namelist variables ``use_restart_time``, ``year_init``, ``month_init``, -``day_init``, and ``sec_init``. The -forcing time can also be overridden using ``fyear_init``. +They were created using the default model +configuration and run for multiple years using the JRA55 forcing.