From 078aab4844983ccbb9c9fddc1a70e060e1d99a34 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 10 May 2022 10:15:06 -0700 Subject: [PATCH] Merge cgridDEV branch including C grid implementation and other fixes (#715) Merge cgridDEV branch to main Co-authored-by: Jean-Francois Lemieux Co-authored-by: David A. Bailey Co-authored-by: Elizabeth Hunke Co-authored-by: daveh150 Co-authored-by: TRasmussen <33480590+TillRasmussen@users.noreply.github.com> Co-authored-by: Philippe Blain --- .github/workflows/test-cice.yml | 1 + .../cicedynB/analysis/ice_diagnostics.F90 | 172 +- cicecore/cicedynB/analysis/ice_history.F90 | 541 +++- .../cicedynB/analysis/ice_history_shared.F90 | 133 +- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 1532 ++++++------ cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 2151 +++++++++++++--- cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 | 76 +- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 2160 +++++++++++++--- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 567 +++-- .../dynamics/ice_transport_driver.F90 | 1157 ++++----- .../cicedynB/dynamics/ice_transport_remap.F90 | 2050 +++++++-------- cicecore/cicedynB/general/ice_flux.F90 | 132 +- cicecore/cicedynB/general/ice_forcing.F90 | 356 ++- cicecore/cicedynB/general/ice_init.F90 | 726 ++++-- cicecore/cicedynB/general/ice_state.F90 | 16 +- cicecore/cicedynB/general/ice_step_mod.F90 | 37 +- .../infrastructure/comm/mpi/ice_boundary.F90 | 2 +- .../infrastructure/comm/mpi/ice_timers.F90 | 2 + .../comm/serial/ice_reprosum.F90 | 32 +- .../infrastructure/comm/serial/ice_timers.F90 | 2 + .../cicedynB/infrastructure/ice_domain.F90 | 42 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 2208 +++++++++++++++-- .../cicedynB/infrastructure/ice_memusage.F90 | 151 ++ .../infrastructure/ice_memusage_gptl.c | 239 ++ .../infrastructure/ice_read_write.F90 | 3 +- .../infrastructure/ice_restart_driver.F90 | 175 +- .../io/io_binary/ice_restart.F90 | 21 +- .../io/io_netcdf/ice_history_write.F90 | 693 +++--- .../io/io_netcdf/ice_restart.F90 | 61 +- .../io/io_pio2/ice_history_write.F90 | 554 ++--- .../infrastructure/io/io_pio2/ice_restart.F90 | 77 +- .../drivers/direct/hadgem3/CICE_RunMod.F90 | 4 +- .../direct/nemo_concepts/CICE_RunMod.F90 | 4 +- cicecore/drivers/mct/cesm1/CICE_RunMod.F90 | 4 +- cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 | 2 +- cicecore/drivers/mct/cesm1/ice_comp_mct.F90 | 2 +- .../drivers/mct/cesm1/ice_import_export.F90 | 22 +- cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 | 2 +- .../drivers/nuopc/cmeps/ice_import_export.F90 | 22 +- cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 | 4 +- cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 | 4 +- cicecore/drivers/nuopc/dmi/cice_cap.info | 20 +- .../drivers/standalone/cice/CICE_InitMod.F90 | 11 + .../drivers/standalone/cice/CICE_RunMod.F90 | 9 +- .../unittest/gridavgchk/CICE_InitMod.F90 | 486 ++++ .../unittest/gridavgchk/gridavgchk.F90 | 730 ++++++ cicecore/shared/ice_restart_column.F90 | 38 +- configuration/scripts/Makefile | 6 +- configuration/scripts/cice.batch.csh | 16 + configuration/scripts/cice.launch.csh | 12 + configuration/scripts/cice_decomp.csh | 11 + configuration/scripts/ice_in | 23 +- .../scripts/machines/Macros.narwhal_aocc | 2 +- .../scripts/machines/Macros.narwhal_cray | 2 +- .../scripts/machines/Macros.nrlssc_gnu | 59 + .../scripts/machines/env.banting_gnu | 2 + .../scripts/machines/env.banting_intel | 2 +- .../scripts/machines/env.cesium_intel | 2 + .../scripts/machines/env.cheyenne_gnu | 4 +- .../scripts/machines/env.cheyenne_intel | 4 +- .../scripts/machines/env.cheyenne_pgi | 4 +- .../scripts/machines/env.conda_macos | 5 + configuration/scripts/machines/env.cori_intel | 21 +- configuration/scripts/machines/env.daley_gnu | 2 + configuration/scripts/machines/env.fram_intel | 2 + configuration/scripts/machines/env.gaea_intel | 2 +- configuration/scripts/machines/env.hera_intel | 8 +- configuration/scripts/machines/env.izumi_gnu | 2 +- .../scripts/machines/env.izumi_intel | 2 +- configuration/scripts/machines/env.izumi_nag | 2 +- configuration/scripts/machines/env.izumi_pgi | 2 +- .../scripts/machines/env.millikan_intel | 2 + .../scripts/machines/env.mustang_intel18 | 2 +- .../scripts/machines/env.mustang_intel20 | 2 +- .../scripts/machines/env.narwhal_aocc | 4 +- configuration/scripts/machines/env.nrlssc_gnu | 16 + .../scripts/machines/env.orion_intel | 31 +- .../scripts/options/set_env.gridavgchk | 2 + configuration/scripts/options/set_nml.alt01 | 2 +- configuration/scripts/options/set_nml.alt02 | 2 +- configuration/scripts/options/set_nml.alt03 | 2 +- configuration/scripts/options/set_nml.alt04 | 2 +- configuration/scripts/options/set_nml.alt05 | 2 +- configuration/scripts/options/set_nml.alt06 | 2 +- configuration/scripts/options/set_nml.box2001 | 8 +- configuration/scripts/options/set_nml.boxadv | 8 +- configuration/scripts/options/set_nml.boxchan | 59 + .../scripts/options/set_nml.boxislandse | 46 + .../scripts/options/set_nml.boxislandsn | 46 + .../scripts/options/set_nml.boxislandsne | 46 + .../scripts/options/set_nml.boxnodyn | 36 +- .../scripts/options/set_nml.boxrestore | 8 +- .../scripts/options/set_nml.boxslotcyl | 11 +- configuration/scripts/options/set_nml.boxsyme | 58 + configuration/scripts/options/set_nml.boxsymn | 58 + .../scripts/options/set_nml.boxsymne | 58 + configuration/scripts/options/set_nml.boxsyms | 58 + configuration/scripts/options/set_nml.boxsymw | 58 + configuration/scripts/options/set_nml.boxwall | 58 + .../scripts/options/set_nml.boxwallblock | 58 + .../scripts/options/set_nml.boxwallp5 | 58 + .../scripts/options/set_nml.dwblockall | 1 + .../scripts/options/set_nml.dynanderson | 2 + .../scripts/options/set_nml.dynpicard | 1 + configuration/scripts/options/set_nml.gbox12 | 8 + configuration/scripts/options/set_nml.gbox128 | 8 +- configuration/scripts/options/set_nml.gbox180 | 6 +- configuration/scripts/options/set_nml.gbox80 | 6 +- configuration/scripts/options/set_nml.gridb | 2 + configuration/scripts/options/set_nml.gridc | 2 + configuration/scripts/options/set_nml.gridcd | 2 + configuration/scripts/options/set_nml.histdbg | 225 ++ .../scripts/options/set_nml.icdefault | 2 +- .../scripts/options/set_nml.iobinary | 2 +- .../scripts/options/set_nml.kmtislands | 1 + .../scripts/options/set_nml.run10year | 2 +- configuration/scripts/options/set_nml.tx1 | 2 +- configuration/scripts/options/set_nml.zsal | 2 +- .../scripts/options/test_nml.restart21 | 7 + .../scripts/options/test_nml.restart22 | 6 + configuration/scripts/tests/base_suite.ts | 1 + configuration/scripts/tests/comparelog.csh | 4 +- configuration/scripts/tests/gridsys_suite.ts | 87 + configuration/scripts/tests/omp_suite.ts | 148 +- .../scripts/tests/test_restart2.files | 2 + .../scripts/tests/test_restart2.script | 82 + configuration/scripts/tests/unittest_suite.ts | 5 + doc/source/cice_index.rst | 40 +- doc/source/master_list.bib | 289 ++- doc/source/science_guide/sg_dynamics.rst | 327 ++- doc/source/science_guide/sg_horiztrans.rst | 5 +- doc/source/user_guide/figures/CICE_Cgrid.png | Bin 0 -> 51536 bytes doc/source/user_guide/ug_case_settings.rst | 71 +- doc/source/user_guide/ug_implementation.rst | 223 +- doc/source/user_guide/ug_testing.rst | 8 +- 135 files changed, 14907 insertions(+), 5105 deletions(-) mode change 100755 => 100644 cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 mode change 100755 => 100644 cicecore/cicedynB/dynamics/ice_dyn_shared.F90 create mode 100644 cicecore/cicedynB/infrastructure/ice_memusage.F90 create mode 100644 cicecore/cicedynB/infrastructure/ice_memusage_gptl.c create mode 100644 cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 create mode 100644 cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 create mode 100644 configuration/scripts/machines/Macros.nrlssc_gnu create mode 100755 configuration/scripts/machines/env.nrlssc_gnu create mode 100644 configuration/scripts/options/set_env.gridavgchk create mode 100644 configuration/scripts/options/set_nml.boxchan create mode 100644 configuration/scripts/options/set_nml.boxislandse create mode 100644 configuration/scripts/options/set_nml.boxislandsn create mode 100644 configuration/scripts/options/set_nml.boxislandsne create mode 100644 configuration/scripts/options/set_nml.boxsyme create mode 100644 configuration/scripts/options/set_nml.boxsymn create mode 100644 configuration/scripts/options/set_nml.boxsymne create mode 100644 configuration/scripts/options/set_nml.boxsyms create mode 100644 configuration/scripts/options/set_nml.boxsymw create mode 100644 configuration/scripts/options/set_nml.boxwall create mode 100644 configuration/scripts/options/set_nml.boxwallblock create mode 100644 configuration/scripts/options/set_nml.boxwallp5 create mode 100644 configuration/scripts/options/set_nml.dwblockall create mode 100644 configuration/scripts/options/set_nml.gbox12 create mode 100644 configuration/scripts/options/set_nml.gridb create mode 100644 configuration/scripts/options/set_nml.gridc create mode 100644 configuration/scripts/options/set_nml.gridcd create mode 100644 configuration/scripts/options/set_nml.histdbg create mode 100644 configuration/scripts/options/set_nml.kmtislands create mode 100644 configuration/scripts/options/test_nml.restart21 create mode 100644 configuration/scripts/options/test_nml.restart22 create mode 100644 configuration/scripts/tests/gridsys_suite.ts create mode 100644 configuration/scripts/tests/test_restart2.files create mode 100644 configuration/scripts/tests/test_restart2.script create mode 100644 doc/source/user_guide/figures/CICE_Cgrid.png diff --git a/.github/workflows/test-cice.yml b/.github/workflows/test-cice.yml index 32e784564..c60a07721 100644 --- a/.github/workflows/test-cice.yml +++ b/.github/workflows/test-cice.yml @@ -42,6 +42,7 @@ jobs: run: | sudo xcode-select -r sudo xcode-select -s /Library/Developer/CommandLineTools + sudo ln -s /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/* /usr/local/include/ echo "xcrun --show-sdk-path: $(xcrun --show-sdk-path)" echo "xcode-select -p: $(xcode-select -p)" - name: system info diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index ec041483c..fb9fc5f03 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -127,7 +127,7 @@ subroutine runtime_diags (dt) alvdr_init, alvdf_init, alidr_init, alidf_init use ice_flux_bgc, only: faero_atm, faero_ocn, fiso_atm, fiso_ocn use ice_global_reductions, only: global_sum, global_sum_prod, global_maxval - use ice_grid, only: lmask_n, lmask_s, tarean, tareas + use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_ice use ice_state ! everything ! tcraig, this is likely to cause circular dependency because ice_prescribed_mod is high level routine #ifdef CESMCOUPLED @@ -201,6 +201,17 @@ subroutine runtime_diags (dt) real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & work1, work2 + real (kind=dbl_kind), parameter :: & + maxval_spval = -0.9_dbl_kind*HUGE(0.0_dbl_kind) ! spval to detect + ! undefined values returned from global_maxval. if global_maxval + ! is applied to a region that does not exist (for instance + ! southern hemisphere in box cases), global_maxval + ! returns -HUGE which we want to avoid writing. The + ! return value is checked against maxval_spval before writing. + +! real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & +! uvelT, vvelT + character(len=*), parameter :: subname = '(runtime_diags)' call icepack_query_parameters(ktherm_out=ktherm, calc_Tsfc_out=calc_Tsfc) @@ -228,6 +239,8 @@ subroutine runtime_diags (dt) ! hemispheric quantities ! total ice area + arean = c0 + areas = c0 arean = global_sum(aice, distrb_info, field_loc_center, tarean) areas = global_sum(aice, distrb_info, field_loc_center, tareas) arean = arean * m2_to_km2 @@ -244,6 +257,8 @@ subroutine runtime_diags (dt) enddo enddo !$OMP END PARALLEL DO + extentn = c0 + extents = c0 extentn = global_sum(work1, distrb_info, field_loc_center, & tarean) extents = global_sum(work1, distrb_info, field_loc_center, & @@ -252,10 +267,14 @@ subroutine runtime_diags (dt) extents = extents * m2_to_km2 ! total ice volume + shmaxn = c0 + shmaxs = c0 shmaxn = global_sum(vice, distrb_info, field_loc_center, tarean) shmaxs = global_sum(vice, distrb_info, field_loc_center, tareas) ! total snow volume + snwmxn = c0 + snwmxs = c0 snwmxn = global_sum(vsno, distrb_info, field_loc_center, tarean) snwmxs = global_sum(vsno, distrb_info, field_loc_center, tareas) @@ -293,7 +312,25 @@ subroutine runtime_diags (dt) enddo enddo enddo - !$OMP END PARALLEL DO + ! Eventually do energy diagnostic on T points. +! if (grid_ice == 'CD') then +! !$OMP PARALLEL DO PRIVATE(iblk,i,j) +! do iblk = 1, nblocks +! do j = 1, ny_block +! do i = 1, nx_block +! call grid_average_X2Y('E2TS',uvelE,uvelT) +! call grid_average_X2Y('N2TS',vvelN,vvelT) +! work1(i,j,iblk) = p5 & +! * (rhos*vsno(i,j,iblk) + rhoi*vice(i,j,iblk)) & +! * (uvelT(i,j,iblk)*uvelT(i,j,iblk) & +! + vvelT(i,j,iblk)*vvelT(i,j,iblk)) +! enddo +! enddo +! enddo +! endif +! !$OMP END PARALLEL DO + ketotn = c0 + ketots = c0 ketotn = global_sum(work1, distrb_info, field_loc_center, tarean) ketots = global_sum(work1, distrb_info, field_loc_center, tareas) @@ -370,23 +407,57 @@ subroutine runtime_diags (dt) endif ! maximum ice volume (= mean thickness including open water) + hmaxn = c0 + hmaxs = c0 hmaxn = global_maxval(vice, distrb_info, lmask_n) hmaxs = global_maxval(vice, distrb_info, lmask_s) + if (hmaxn < maxval_spval) hmaxn = c0 + if (hmaxs < maxval_spval) hmaxs = c0 ! maximum ice speed - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = sqrt(uvel(i,j,iblk)**2 & - + vvel(i,j,iblk)**2) + if (grid_ice == 'CD') then + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = max(sqrt(uvelE(i,j,iblk)**2 & + + vvelE(i,j,iblk)**2), & + sqrt(uvelN(i,j,iblk)**2 & + + vvelN(i,j,iblk)**2)) + enddo + enddo enddo + !$OMP END PARALLEL DO + elseif (grid_ice == 'C') then + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = sqrt(uvelE(i,j,iblk)**2 & + + vvelN(i,j,iblk)**2) + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO + !$OMP END PARALLEL DO + else + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = sqrt(uvel(i,j,iblk)**2 & + + vvel(i,j,iblk)**2) + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif + umaxn = c0 + umaxs = c0 umaxn = global_maxval(work1, distrb_info, lmask_n) umaxs = global_maxval(work1, distrb_info, lmask_s) + if (umaxn < maxval_spval) umaxn = c0 + if (umaxs < maxval_spval) umaxs = c0 ! Write warning message if ice speed is too big ! (Ice speeds of ~1 m/s or more usually indicate instability) @@ -427,8 +498,12 @@ subroutine runtime_diags (dt) ! maximum ice strength + pmaxn = c0 + pmaxs = c0 pmaxn = global_maxval(strength, distrb_info, lmask_n) pmaxs = global_maxval(strength, distrb_info, lmask_s) + if (pmaxn < maxval_spval) pmaxn = c0 + if (pmaxs < maxval_spval) pmaxs = c0 pmaxn = pmaxn / c1000 ! convert to kN/m pmaxs = pmaxs / c1000 @@ -437,7 +512,9 @@ subroutine runtime_diags (dt) ! total ice/snow internal energy call total_energy (work1) - + + etotn = c0 + etots = c0 etotn = global_sum(work1, distrb_info, & field_loc_center, tarean) etots = global_sum(work1, distrb_info, & @@ -452,6 +529,8 @@ subroutine runtime_diags (dt) ! evaporation + evpn = c0 + evps = c0 evpn = global_sum_prod(evap, aice, distrb_info, & field_loc_center, tarean) evps = global_sum_prod(evap, aice, distrb_info, & @@ -470,6 +549,8 @@ subroutine runtime_diags (dt) endif ! salt flux + sfsaltn = c0 + sfsalts = c0 sfsaltn = global_sum(fsalt_ai, distrb_info, & field_loc_center, tarean) sfsalts = global_sum(fsalt_ai, distrb_info, & @@ -478,6 +559,8 @@ subroutine runtime_diags (dt) sfsalts = sfsalts*dt ! fresh water flux + sfreshn = c0 + sfreshs = c0 sfreshn = global_sum(fresh_ai, distrb_info, & field_loc_center, tarean) sfreshs = global_sum(fresh_ai, distrb_info, & @@ -499,6 +582,8 @@ subroutine runtime_diags (dt) ! ocean heat ! Note: fswthru not included because it does not heat ice + fhocnn = c0 + fhocns = c0 fhocnn = global_sum(fhocn_ai, distrb_info, & field_loc_center, tarean) fhocns = global_sum(fhocn_ai, distrb_info, & @@ -548,6 +633,8 @@ subroutine runtime_diags (dt) endif ! calc_Tsfc + fhatmn = c0 + fhatms = c0 fhatmn = global_sum(work1, distrb_info, & field_loc_center, tarean) fhatms = global_sum(work1, distrb_info, & @@ -564,6 +651,8 @@ subroutine runtime_diags (dt) enddo !$OMP END PARALLEL DO + fswnetn = c0 + fswnets = c0 fswnetn = global_sum(work1, distrb_info, & field_loc_center, tarean) fswnets = global_sum(work1, distrb_info, & @@ -582,6 +671,8 @@ subroutine runtime_diags (dt) enddo !$OMP END PARALLEL DO + fswdnn = c0 + fswdns = c0 fswdnn = global_sum(work1, distrb_info, & field_loc_center, tarean) fswdns = global_sum(work1, distrb_info, & @@ -597,12 +688,17 @@ subroutine runtime_diags (dt) enddo enddo !$OMP END PARALLEL DO + + fhfrzn = c0 + fhfrzs = c0 fhfrzn = global_sum(work1, distrb_info, & field_loc_center, tarean) fhfrzs = global_sum(work1, distrb_info, & field_loc_center, tareas) ! rain + rnn = c0 + rns = c0 rnn = global_sum_prod(frain, aice_init, distrb_info, & field_loc_center, tarean) rns = global_sum_prod(frain, aice_init, distrb_info, & @@ -611,6 +707,8 @@ subroutine runtime_diags (dt) rns = rns*dt ! snow + snn = c0 + sns = c0 snn = global_sum_prod(fsnow, aice_init, distrb_info, & field_loc_center, tarean) sns = global_sum_prod(fsnow, aice_init, distrb_info, & @@ -623,6 +721,8 @@ subroutine runtime_diags (dt) work1(:,:,:) = frazil(:,:,:)*rhoi/dt if (ktherm == 2 .and. .not.update_ocn_f) & work1(:,:,:) = (frazil(:,:,:)-frazil_diag(:,:,:))*rhoi/dt + frzn = c0 + frzs = c0 frzn = global_sum(work1, distrb_info, & field_loc_center, tarean) frzs = global_sum(work1, distrb_info, & @@ -706,6 +806,16 @@ subroutine runtime_diags (dt) ! isotopes if (tr_iso) then + fisoan = c0 + fisoas = c0 + fisoon = c0 + fisoos = c0 + isototn = c0 + isotots = c0 + isomx1n = c0 + isomx1s = c0 + isorn = c0 + isors = c0 do n = 1, n_iso fisoan(n) = global_sum_prod(fiso_atm(:,:,n,:), aice_init, & distrb_info, field_loc_center, tarean) @@ -738,6 +848,8 @@ subroutine runtime_diags (dt) isotots(n) = global_sum(work1, distrb_info, field_loc_center, tareas) isomx1n(n) = global_maxval(work1, distrb_info, lmask_n) isomx1s(n) = global_maxval(work1, distrb_info, lmask_s) + if (isomx1n(n) < maxval_spval) isomx1n(n) = c0 + if (isomx1s(n) < maxval_spval) isomx1s(n) = c0 isorn(n) = (totison(n)-isototn(n)+fisoan(n)-fisoon(n))/(isototn(n)+c1) isors(n) = (totisos(n)-isotots(n)+fisoas(n)-fisoos(n))/(isotots(n)+c1) enddo ! n_iso @@ -745,6 +857,16 @@ subroutine runtime_diags (dt) ! aerosols if (tr_aero) then + faeran = c0 + faeras = c0 + faeron = c0 + faeros = c0 + aerototn = c0 + aerotots = c0 + aeromx1n = c0 + aeromx1s = c0 + aerrn = c0 + aerrs = c0 do n = 1, n_aero faeran(n) = global_sum_prod(faero_atm(:,:,n,:), aice_init, & distrb_info, field_loc_center, tarean) @@ -776,6 +898,8 @@ subroutine runtime_diags (dt) aerotots(n) = global_sum(work1, distrb_info, field_loc_center, tareas) aeromx1n(n) = global_maxval(work1, distrb_info, lmask_n) aeromx1s(n) = global_maxval(work1, distrb_info, lmask_s) + if (aeromx1n(n) < maxval_spval) aeromx1n(n) = c0 + if (aeromx1s(n) < maxval_spval) aeromx1s(n) = c0 aerrn(n) = (totaeron(n)-aerototn(n)+faeran(n)-faeron(n)) & / (aerototn(n) + c1) @@ -1629,10 +1753,12 @@ end subroutine debug_ice subroutine print_state(plabel,i,j,iblk) + use ice_grid, only: grid_ice use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, nfsd - use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, trcrn + use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, & + uvelE, vvelE, uvelN, vvelN, trcrn use ice_flux, only: uatm, vatm, potT, Tair, Qa, flw, frain, fsnow, & fsens, flat, evap, flwout, swvdr, swvdf, swidr, swidf, rhoa, & frzmlt, sst, sss, Tf, Tref, Qref, Uref, uocn, vocn, strtltx, strtlty @@ -1754,6 +1880,15 @@ subroutine print_state(plabel,i,j,iblk) write(nu_diag,*) 'uvel(i,j)',uvel(i,j,iblk) write(nu_diag,*) 'vvel(i,j)',vvel(i,j,iblk) + if (grid_ice == 'C') then + write(nu_diag,*) 'uvelE(i,j)',uvelE(i,j,iblk) + write(nu_diag,*) 'uvelN(i,j)',uvelN(i,j,iblk) + elseif (grid_ice == 'CD') then + write(nu_diag,*) 'uvelE(i,j)',uvelE(i,j,iblk) + write(nu_diag,*) 'vvelE(i,j)',vvelE(i,j,iblk) + write(nu_diag,*) 'uvelN(i,j)',uvelN(i,j,iblk) + write(nu_diag,*) 'vvelN(i,j)',vvelN(i,j,iblk) + endif write(nu_diag,*) ' ' write(nu_diag,*) 'atm states and fluxes' @@ -1801,10 +1936,12 @@ end subroutine print_state subroutine print_points_state(plabel,ilabel) + use ice_grid, only: grid_ice use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr - use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, trcrn + use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, & + uvelE, vvelE, uvelE, vvelE, trcrn use ice_flux, only: uatm, vatm, potT, Tair, Qa, flw, frain, fsnow, & fsens, flat, evap, flwout, swvdr, swvdf, swidr, swidf, rhoa, & frzmlt, sst, sss, Tf, Tref, Qref, Uref, uocn, vocn, strtltx, strtlty @@ -1896,6 +2033,15 @@ subroutine print_points_state(plabel,ilabel) write(nu_diag,*) trim(llabel),'uvel=',uvel(i,j,iblk) write(nu_diag,*) trim(llabel),'vvel=',vvel(i,j,iblk) + if (grid_ice == 'C') then + write(nu_diag,*) trim(llabel),'uvelE=',uvelE(i,j,iblk) + write(nu_diag,*) trim(llabel),'vvelN=',vvelN(i,j,iblk) + elseif (grid_ice == 'CD') then + write(nu_diag,*) trim(llabel),'uvelE=',uvelE(i,j,iblk) + write(nu_diag,*) trim(llabel),'vvelE=',vvelE(i,j,iblk) + write(nu_diag,*) trim(llabel),'uvelN=',uvelN(i,j,iblk) + write(nu_diag,*) trim(llabel),'vvelN=',vvelN(i,j,iblk) + endif write(nu_diag,*) ' ' write(nu_diag,*) 'atm states and fluxes' diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index c32f4b78d..94ee4f956 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -68,6 +68,9 @@ subroutine init_hist (dt) use ice_domain_size, only: max_blocks, max_nstrm, nilyr, nslyr, nblyr, ncat, nfsd use ice_dyn_shared, only: kdyn use ice_flux, only: mlt_onset, frz_onset, albcnt, snwcnt + use ice_grid, only: grid_ice, & + grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & + grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv use ice_history_shared ! everything use ice_history_mechred, only: init_hist_mechred_2D, init_hist_mechred_3Dc use ice_history_pond, only: init_hist_pond_2D, init_hist_pond_3Dc @@ -93,9 +96,109 @@ subroutine init_hist (dt) integer (kind=int_kind), dimension(max_nstrm) :: & ntmp integer (kind=int_kind) :: nml_error ! namelist i/o error flag + character (len=25) :: & + str2D_gat, str2d_gau, str2d_gav, & ! dimensions for t, u, v atm grid (ga) + str2D_got, str2d_gou, str2d_gov ! dimensions for t, u, v ocn grid (go) + character (len=25) :: & + cstr_gat, cstr_gau, cstr_gav, & ! mask area name for t, u, v atm grid (ga) + cstr_got, cstr_gou, cstr_gov ! mask area name for t, u, v ocn grid (go) + character(len=char_len) :: description character(len=*), parameter :: subname = '(init_hist)' + !----------------------------------------------------------------- + ! set atm/ocn forcing grids + !----------------------------------------------------------------- + + !--- ATM --- + + if (grid_atm_thrm == 'T') then + str2D_gat = tstr2D + cstr_gat = tcstr + elseif (grid_atm_thrm == 'U') then + str2D_gat = ustr2D + cstr_gat = ucstr + elseif (grid_atm_thrm == 'N') then + str2D_gat = nstr2D + cstr_gat = ncstr + elseif (grid_atm_thrm == 'E') then + str2D_gat = estr2D + cstr_gat = ecstr + endif + + if (grid_atm_dynu == 'T') then + str2D_gau = tstr2D + cstr_gau = tcstr + elseif (grid_atm_dynu == 'U') then + str2D_gau = ustr2D + cstr_gau = ucstr + elseif (grid_atm_dynu == 'N') then + str2D_gau = nstr2D + cstr_gau = ncstr + elseif (grid_atm_dynu == 'E') then + str2D_gau = estr2D + cstr_gau = ecstr + endif + + if (grid_atm_dynv == 'T') then + str2D_gav = tstr2D + cstr_gav = tcstr + elseif (grid_atm_dynv == 'U') then + str2D_gav = ustr2D + cstr_gav = ucstr + elseif (grid_atm_dynv == 'N') then + str2D_gav = nstr2D + cstr_gav = ncstr + elseif (grid_atm_dynv == 'E') then + str2D_gav = estr2D + cstr_gav = ecstr + endif + + !--- OCN --- + + if (grid_ocn_thrm == 'T') then + str2D_got = tstr2D + cstr_got = tcstr + elseif (grid_ocn_thrm == 'U') then + str2D_got = ustr2D + cstr_got = ucstr + elseif (grid_ocn_thrm == 'N') then + str2D_got = nstr2D + cstr_got = ncstr + elseif (grid_ocn_thrm == 'E') then + str2D_got = estr2D + cstr_got = ecstr + endif + + if (grid_ocn_dynu == 'T') then + str2D_gou = tstr2D + cstr_gou = tcstr + elseif (grid_ocn_dynu == 'U') then + str2D_gou = ustr2D + cstr_gou = ucstr + elseif (grid_ocn_dynu == 'N') then + str2D_gou = nstr2D + cstr_gou = ncstr + elseif (grid_ocn_dynu == 'E') then + str2D_gou = estr2D + cstr_gou = ecstr + endif + + if (grid_ocn_dynv == 'T') then + str2D_gov = tstr2D + cstr_gov = tcstr + elseif (grid_ocn_dynv == 'U') then + str2D_gov = ustr2D + cstr_gov = ucstr + elseif (grid_ocn_dynv == 'N') then + str2D_gov = nstr2D + cstr_gov = ncstr + elseif (grid_ocn_dynv == 'E') then + str2D_gov = estr2D + cstr_gov = ecstr + endif + + !----------------------------------------------------------------- ! set history dimensions !----------------------------------------------------------------- @@ -280,6 +383,41 @@ subroutine init_hist (dt) f_sispeed = f_CMIP endif + if (grid_ice == 'CD' .or. grid_ice == 'C') then + f_uvelE = f_uvel + f_vvelE = f_vvel + f_icespdE = f_icespd + f_icedirE = f_icedir + f_uvelN = f_uvel + f_vvelN = f_vvel + f_icespdN = f_icespd + f_icedirN = f_icedir + f_strairxN = f_strairx + f_strairyN = f_strairy + f_strairxE = f_strairx + f_strairyE = f_strairy + f_strocnxN = f_strocnx + f_strocnyN = f_strocny + f_strocnxE = f_strocnx + f_strocnyE = f_strocny + f_strcorxN = f_strcorx + f_strcoryN = f_strcory + f_strcorxE = f_strcorx + f_strcoryE = f_strcory + f_strintxN = f_strintx + f_strintyN = f_strinty + f_strintxE = f_strintx + f_strintyE = f_strinty + f_strtltxN = f_strtltx + f_strtltyN = f_strtlty + f_strtltxE = f_strtltx + f_strtltyE = f_strtlty + f_taubxN = f_taubx + f_taubyN = f_tauby + f_taubxE = f_taubx + f_taubyE = f_tauby + endif + #ifndef ncdf f_bounds = .false. #endif @@ -293,13 +431,22 @@ subroutine init_hist (dt) if (tr_fsd) f_NFSD = .true. call broadcast_scalar (f_tmask, master_task) + call broadcast_scalar (f_umask, master_task) + call broadcast_scalar (f_nmask, master_task) + call broadcast_scalar (f_emask, master_task) call broadcast_scalar (f_blkmask, master_task) call broadcast_scalar (f_tarea, master_task) call broadcast_scalar (f_uarea, master_task) + call broadcast_scalar (f_narea, master_task) + call broadcast_scalar (f_earea, master_task) call broadcast_scalar (f_dxt, master_task) call broadcast_scalar (f_dyt, master_task) call broadcast_scalar (f_dxu, master_task) call broadcast_scalar (f_dyu, master_task) + call broadcast_scalar (f_dxn, master_task) + call broadcast_scalar (f_dyn, master_task) + call broadcast_scalar (f_dxe, master_task) + call broadcast_scalar (f_dye, master_task) call broadcast_scalar (f_HTN, master_task) call broadcast_scalar (f_HTE, master_task) call broadcast_scalar (f_ANGLE, master_task) @@ -321,6 +468,16 @@ subroutine init_hist (dt) call broadcast_scalar (f_aice, master_task) call broadcast_scalar (f_uvel, master_task) call broadcast_scalar (f_vvel, master_task) + call broadcast_scalar (f_icespd, master_task) + call broadcast_scalar (f_icedir, master_task) + call broadcast_scalar (f_uvelE, master_task) + call broadcast_scalar (f_vvelE, master_task) + call broadcast_scalar (f_icespdE, master_task) + call broadcast_scalar (f_icedirE, master_task) + call broadcast_scalar (f_uvelN, master_task) + call broadcast_scalar (f_vvelN, master_task) + call broadcast_scalar (f_icespdN, master_task) + call broadcast_scalar (f_icedirN, master_task) call broadcast_scalar (f_uatm, master_task) call broadcast_scalar (f_vatm, master_task) call broadcast_scalar (f_atmspd, master_task) @@ -397,6 +554,30 @@ subroutine init_hist (dt) call broadcast_scalar (f_strinty, master_task) call broadcast_scalar (f_taubx, master_task) call broadcast_scalar (f_tauby, master_task) + call broadcast_scalar (f_strairxN, master_task) + call broadcast_scalar (f_strairyN, master_task) + call broadcast_scalar (f_strtltxN, master_task) + call broadcast_scalar (f_strtltyN, master_task) + call broadcast_scalar (f_strcorxN, master_task) + call broadcast_scalar (f_strcoryN, master_task) + call broadcast_scalar (f_strocnxN, master_task) + call broadcast_scalar (f_strocnyN, master_task) + call broadcast_scalar (f_strintxN, master_task) + call broadcast_scalar (f_strintyN, master_task) + call broadcast_scalar (f_taubxN, master_task) + call broadcast_scalar (f_taubyN, master_task) + call broadcast_scalar (f_strairxE, master_task) + call broadcast_scalar (f_strairyE, master_task) + call broadcast_scalar (f_strtltxE, master_task) + call broadcast_scalar (f_strtltyE, master_task) + call broadcast_scalar (f_strcorxE, master_task) + call broadcast_scalar (f_strcoryE, master_task) + call broadcast_scalar (f_strocnxE, master_task) + call broadcast_scalar (f_strocnyE, master_task) + call broadcast_scalar (f_strintxE, master_task) + call broadcast_scalar (f_strintyE, master_task) + call broadcast_scalar (f_taubxE, master_task) + call broadcast_scalar (f_taubyE, master_task) call broadcast_scalar (f_strength, master_task) call broadcast_scalar (f_divu, master_task) call broadcast_scalar (f_shear, master_task) @@ -541,38 +722,88 @@ subroutine init_hist (dt) "snow/ice surface temperature", & "averaged with Tf if no ice is present", c1, c0, & ns1, f_Tsfc) - + call define_hist_field(n_aice,"aice","1",tstr2D, tcstr, & "ice area (aggregate)", & "none", c1, c0, & ns1, f_aice) + + call define_hist_field(n_uvelE,"uvelE","m/s",estr2D, ecstr, & + "ice velocity (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_uvelE) + + call define_hist_field(n_vvelE,"vvelE","m/s",estr2D, ecstr, & + "ice velocity (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_vvelE) + + call define_hist_field(n_icespdE,"icespdE","m/s",estr2D, ecstr, & + "sea ice speed", & + "vector magnitude on E grid", c1, c0, & + ns1, f_icespdE) + + call define_hist_field(n_icedirE,"icedirE","deg",estr2D, ecstr, & + "sea ice direction", & + "vector direction - coming from on E grid", c1, c0, & + ns1, f_icedirE) + + call define_hist_field(n_uvelN,"uvelN","m/s",nstr2D, ncstr, & + "ice velocity (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_uvelN) + + call define_hist_field(n_vvelN,"vvelN","m/s",nstr2D, ncstr, & + "ice velocity (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_vvelN) + + call define_hist_field(n_icespdN,"icespdN","m/s",nstr2D, ncstr, & + "sea ice speed", & + "vector magnitude on N grid", c1, c0, & + ns1, f_icespdN) + + call define_hist_field(n_icedirN,"icedirN","deg",nstr2D, ncstr, & + "sea ice direction", & + "vector direction - coming from on N grid", c1, c0, & + ns1, f_icedirN) call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & "ice velocity (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uvel) - + call define_hist_field(n_vvel,"vvel","m/s",ustr2D, ucstr, & "ice velocity (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vvel) - call define_hist_field(n_uatm,"uatm","m/s",ustr2D, ucstr, & + call define_hist_field(n_icespd,"icespd","m/s",ustr2D, ucstr, & + "sea ice speed", & + "vector magnitude", c1, c0, & + ns1, f_icespd) + + call define_hist_field(n_icedir,"icedir","deg",ustr2D, ucstr, & + "sea ice direction", & + "vector direction - coming from", c1, c0, & + ns1, f_icedir) + + call define_hist_field(n_uatm,"uatm","m/s",str2D_gau, cstr_gau, & "atm velocity (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uatm) - call define_hist_field(n_vatm,"vatm","m/s",ustr2D, ucstr, & + call define_hist_field(n_vatm,"vatm","m/s",str2D_gav, cstr_gav, & "atm velocity (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vatm) - call define_hist_field(n_atmspd,"atmspd","m/s",ustr2D, ucstr, & + call define_hist_field(n_atmspd,"atmspd","m/s",str2D_gau, cstr_gau, & "atmosphere wind speed", & "vector magnitude", c1, c0, & ns1, f_atmspd) - call define_hist_field(n_atmdir,"atmdir","deg",ustr2D, ucstr, & + call define_hist_field(n_atmdir,"atmdir","deg",str2D_gau, cstr_gau, & "atmosphere wind direction", & "vector direction - coming from", c1, c0, & ns1, f_atmdir) @@ -627,22 +858,22 @@ subroutine init_hist (dt) "none", c1, c0, & ns1, f_sss) - call define_hist_field(n_uocn,"uocn","m/s",ustr2D, ucstr, & + call define_hist_field(n_uocn,"uocn","m/s",str2D_gou, cstr_gou, & "ocean current (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uocn) - call define_hist_field(n_vocn,"vocn","m/s",ustr2D, ucstr, & + call define_hist_field(n_vocn,"vocn","m/s",str2D_gov, cstr_gov, & "ocean current (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vocn) - call define_hist_field(n_ocnspd,"ocnspd","m/s",ustr2D, ucstr, & + call define_hist_field(n_ocnspd,"ocnspd","m/s",str2D_gou, cstr_gou, & "ocean current speed", & "vector magnitude", c1, c0, & ns1, f_ocnspd) - call define_hist_field(n_ocndir,"ocndir","deg",ustr2D, ucstr, & + call define_hist_field(n_ocndir,"ocndir","deg",str2D_gou, cstr_gou, & "ocean current direction", & "vector direction - going to", c1, c0, & ns1, f_ocndir) @@ -938,6 +1169,126 @@ subroutine init_hist (dt) "positive is y direction on U grid", c1, c0, & ns1, f_tauby) + call define_hist_field(n_strairxN,"strairxN","N/m^2",nstr2D, ncstr, & + "atm/ice stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_strairxN) + + call define_hist_field(n_strairyN,"strairyN","N/m^2",nstr2D, ncstr, & + "atm/ice stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_strairyN) + + call define_hist_field(n_strairxE,"strairxE","N/m^2",estr2D, ecstr, & + "atm/ice stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_strairxE) + + call define_hist_field(n_strairyE,"strairyE","N/m^2",estr2D, ecstr, & + "atm/ice stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_strairyE) + + call define_hist_field(n_strtltxN,"strtltxN","N/m^2",nstr2D, ncstr, & + "sea sfc tilt stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_strtltxN) + + call define_hist_field(n_strtltyN,"strtltyN","N/m^2",nstr2D, ncstr, & + "sea sfc tilt stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_strtltyN) + + call define_hist_field(n_strtltxE,"strtltxE","N/m^2",estr2D, ecstr, & + "sea sfc tilt stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_strtltxE) + + call define_hist_field(n_strtltyE,"strtltyE","N/m^2",estr2D, ecstr, & + "sea sfc tilt stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_strtltyE) + + call define_hist_field(n_strcorxN,"strcorxN","N/m^2",nstr2D, ncstr, & + "coriolis stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_strcorxN) + + call define_hist_field(n_strcoryN,"strcoryN","N/m^2",nstr2D, ncstr, & + "coriolis stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_strcoryN) + + call define_hist_field(n_strcorxE,"strcorxE","N/m^2",estr2D, ecstr, & + "coriolis stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_strcorxE) + + call define_hist_field(n_strcoryE,"strcoryE","N/m^2",estr2D, ecstr, & + "coriolis stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_strcoryE) + + call define_hist_field(n_strocnxN,"strocnxN","N/m^2",nstr2D, ncstr, & + "ocean/ice stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_strocnxN) + + call define_hist_field(n_strocnyN,"strocnyN","N/m^2",nstr2D, ncstr, & + "ocean/ice stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_strocnyN) + + call define_hist_field(n_strocnxE,"strocnxE","N/m^2",estr2D, ecstr, & + "ocean/ice stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_strocnxE) + + call define_hist_field(n_strocnyE,"strocnyE","N/m^2",estr2D, ecstr, & + "ocean/ice stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_strocnyE) + + call define_hist_field(n_strintxN,"strintxN","N/m^2",nstr2D, ncstr, & + "internal ice stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_strintxN) + + call define_hist_field(n_strintyN,"strintyN","N/m^2",nstr2D, ncstr, & + "internal ice stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_strintyN) + + call define_hist_field(n_strintxE,"strintxE","N/m^2",estr2D, ecstr, & + "internal ice stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_strintxE) + + call define_hist_field(n_strintyE,"strintyE","N/m^2",estr2D, ecstr, & + "internal ice stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_strintyE) + + call define_hist_field(n_taubxN,"taubxN","N/m^2",nstr2D, ncstr, & + "seabed (basal) stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_taubxN) + + call define_hist_field(n_taubyN,"taubyN","N/m^2",nstr2D, ncstr, & + "seabed (basal) stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_taubyN) + + call define_hist_field(n_taubxE,"taubxE","N/m^2",estr2D, ecstr, & + "seabed (basal) stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_taubxE) + + call define_hist_field(n_taubyE,"taubyE","N/m^2",estr2D, ecstr, & + "seabed (basal) stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_taubyE) + call define_hist_field(n_strength,"strength","N/m",tstr2D, tcstr, & "compressive ice strength", & "none", c1, c0, & @@ -953,19 +1304,26 @@ subroutine init_hist (dt) "none", secday*c100, c0, & ns1, f_shear) + select case (grid_ice) + case('B') + description = ", on U grid (NE corner values)" + case ('CD','C') + description = ", on T grid" + end select + call define_hist_field(n_sig1,"sig1","1",ustr2D, ucstr, & "norm. principal stress 1", & - "sig1 is instantaneous", c1, c0, & + "sig1 is instantaneous" // trim(description), c1, c0, & ns1, f_sig1) call define_hist_field(n_sig2,"sig2","1",ustr2D, ucstr, & "norm. principal stress 2", & - "sig2 is instantaneous", c1, c0, & + "sig2 is instantaneous" // trim(description), c1, c0, & ns1, f_sig2) call define_hist_field(n_sigP,"sigP","1",ustr2D, ucstr, & "ice pressure", & - "sigP is instantaneous", c1, c0, & + "sigP is instantaneous" // trim(description), c1, c0, & ns1, f_sigP) call define_hist_field(n_dvidtt,"dvidtt","cm/day",tstr2D, tcstr, & @@ -1595,13 +1953,22 @@ subroutine init_hist (dt) igrd=.true. igrd(n_tmask ) = f_tmask + igrd(n_umask ) = f_umask + igrd(n_nmask ) = f_nmask + igrd(n_emask ) = f_emask igrd(n_blkmask ) = f_blkmask igrd(n_tarea ) = f_tarea igrd(n_uarea ) = f_uarea + igrd(n_narea ) = f_narea + igrd(n_earea ) = f_earea igrd(n_dxt ) = f_dxt igrd(n_dyt ) = f_dyt igrd(n_dxu ) = f_dxu igrd(n_dyu ) = f_dyu + igrd(n_dxn ) = f_dxn + igrd(n_dyn ) = f_dyn + igrd(n_dxe ) = f_dxe + igrd(n_dye ) = f_dye igrd(n_HTN ) = f_HTN igrd(n_HTE ) = f_HTE igrd(n_ANGLE ) = f_ANGLE @@ -1724,7 +2091,7 @@ subroutine accum_hist (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, nblocks use ice_domain_size, only: nfsd - use ice_grid, only: tmask, lmask_n, lmask_s, dxu, dyu + use ice_grid, only: tmask, lmask_n, lmask_s, dxu, dyu, grid_ice use ice_calendar, only: new_year, write_history, & write_ic, timesecs, histfreq, nstreams, mmonth, & new_month @@ -1736,11 +2103,17 @@ subroutine accum_hist (dt) albice, albsno, albpnd, coszen, flat, fsens, flwout, evap, evaps, evapi, & Tair, Tref, Qref, congel, frazil, frazil_diag, snoice, dsnow, & melts, meltb, meltt, meltl, fresh, fsalt, fresh_ai, fsalt_ai, & - fhocn, fhocn_ai, uatm, vatm, fbot, Tbot, Tsnice, & - fswthru_ai, strairx, strairy, strtltx, strtlty, strintx, strinty, & - taubx, tauby, strocnx, strocny, fm, daidtt, dvidtt, daidtd, dvidtd, fsurf, & + fhocn, fhocn_ai, uatm, vatm, fbot, Tbot, Tsnice, fswthru_ai, & + strairx, strairy, strtltx, strtlty, strintx, strinty, & + taubx, tauby, strocnx, strocny, & + strairxN, strairyN, strtltxN, strtltyN, strintxN, strintyN, & + taubxN, taubyN, strocnxN, strocnyN, & + strairxE, strairyE, strtltxE, strtltyE, strintxE, strintyE, & + taubxE, taubyE, strocnxE, strocnyE, & + fm, fmN, fmE, daidtt, dvidtt, daidtd, dvidtd, fsurf, & fcondtop, fcondbot, fsurfn, fcondtopn, flatn, fsensn, albcnt, snwcnt, & stressp_1, stressm_1, stress12_1, & + stresspT, stressmT, stress12T, & stressp_2, & stressp_3, & stressp_4, sig1, sig2, sigP, & @@ -1920,6 +2293,59 @@ subroutine accum_hist (dt) call accum_hist_field(n_uvel, iblk, uvel(:,:,iblk), a2D) if (f_vvel (1:1) /= 'x') & call accum_hist_field(n_vvel, iblk, vvel(:,:,iblk), a2D) + if (f_icespd (1:1) /= 'x') & + call accum_hist_field(n_icespd, iblk, sqrt( & + (uvel(:,:,iblk)*uvel(:,:,iblk)) + & + (vvel(:,:,iblk)*vvel(:,:,iblk))), a2D) + if (f_icedir(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (abs(uvel(i,j,iblk)) > puny .or. abs(vvel(i,j,iblk)) > puny) & + worka(i,j) = atan2(uvel(i,j,iblk),vvel(i,j,iblk))*rad_to_deg + worka(i,j) = worka(i,j) + c180 + enddo + enddo + call accum_hist_field(n_icedir, iblk, worka(:,:), a2D) + endif + if (f_uvelN (1:1) /= 'x') & + call accum_hist_field(n_uvelN, iblk, uvelN(:,:,iblk), a2D) + if (f_vvelN (1:1) /= 'x') & + call accum_hist_field(n_vvelN, iblk, vvelN(:,:,iblk), a2D) + if (f_icespdN (1:1) /= 'x') & + call accum_hist_field(n_icespdN, iblk, sqrt( & + (uvelN(:,:,iblk)*uvelN(:,:,iblk)) + & + (vvelN(:,:,iblk)*vvelN(:,:,iblk))), a2D) + if (f_icedirN(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (abs(uvelN(i,j,iblk)) > puny .or. abs(vvelN(i,j,iblk)) > puny) & + worka(i,j) = atan2(uvelN(i,j,iblk),vvelN(i,j,iblk))*rad_to_deg + worka(i,j) = worka(i,j) + c180 + enddo + enddo + call accum_hist_field(n_icedirN, iblk, worka(:,:), a2D) + endif + if (f_uvelE (1:1) /= 'x') & + call accum_hist_field(n_uvelE, iblk, uvelE(:,:,iblk), a2D) + if (f_vvelE (1:1) /= 'x') & + call accum_hist_field(n_vvelE, iblk, vvelE(:,:,iblk), a2D) + if (f_icespdE (1:1) /= 'x') & + call accum_hist_field(n_icespdE, iblk, sqrt( & + (uvelE(:,:,iblk)*uvelE(:,:,iblk)) + & + (vvelE(:,:,iblk)*vvelE(:,:,iblk))), a2D) + if (f_icedirE(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (abs(uvelE(i,j,iblk)) > puny .or. abs(vvelE(i,j,iblk)) > puny) & + worka(i,j) = atan2(uvelE(i,j,iblk),vvelE(i,j,iblk))*rad_to_deg + worka(i,j) = worka(i,j) + c180 + enddo + enddo + call accum_hist_field(n_icedirE, iblk, worka(:,:), a2D) + endif if (f_uatm (1:1) /= 'x') & call accum_hist_field(n_uatm, iblk, uatm(:,:,iblk), a2D) if (f_vatm (1:1) /= 'x') & @@ -2125,6 +2551,54 @@ subroutine accum_hist (dt) call accum_hist_field(n_taubx, iblk, taubx(:,:,iblk), a2D) if (f_tauby(1:1) /= 'x') & call accum_hist_field(n_tauby, iblk, tauby(:,:,iblk), a2D) + if (f_strairxN(1:1) /= 'x') & + call accum_hist_field(n_strairxN, iblk, strairxN(:,:,iblk), a2D) + if (f_strairyN(1:1) /= 'x') & + call accum_hist_field(n_strairyN, iblk, strairyN(:,:,iblk), a2D) + if (f_strairxE(1:1) /= 'x') & + call accum_hist_field(n_strairxE, iblk, strairxE(:,:,iblk), a2D) + if (f_strairyE(1:1) /= 'x') & + call accum_hist_field(n_strairyE, iblk, strairyE(:,:,iblk), a2D) + if (f_strtltxN(1:1) /= 'x') & + call accum_hist_field(n_strtltxN, iblk, strtltxN(:,:,iblk), a2D) + if (f_strtltyN(1:1) /= 'x') & + call accum_hist_field(n_strtltyN, iblk, strtltyN(:,:,iblk), a2D) + if (f_strtltxE(1:1) /= 'x') & + call accum_hist_field(n_strtltxE, iblk, strtltxE(:,:,iblk), a2D) + if (f_strtltyE(1:1) /= 'x') & + call accum_hist_field(n_strtltyE, iblk, strtltyE(:,:,iblk), a2D) + if (f_strcorxN(1:1) /= 'x') & + call accum_hist_field(n_strcorxN, iblk, fmN(:,:,iblk)*vvelN(:,:,iblk), a2D) + if (f_strcoryN(1:1) /= 'x') & + call accum_hist_field(n_strcoryN, iblk,-fmN(:,:,iblk)*uvelN(:,:,iblk), a2D) + if (f_strcorxE(1:1) /= 'x') & + call accum_hist_field(n_strcorxE, iblk, fmE(:,:,iblk)*vvelE(:,:,iblk), a2D) + if (f_strcoryE(1:1) /= 'x') & + call accum_hist_field(n_strcoryE, iblk,-fmE(:,:,iblk)*uvelE(:,:,iblk), a2D) + if (f_strocnxN(1:1) /= 'x') & + call accum_hist_field(n_strocnxN, iblk, strocnxN(:,:,iblk), a2D) + if (f_strocnyN(1:1) /= 'x') & + call accum_hist_field(n_strocnyN, iblk, strocnyN(:,:,iblk), a2D) + if (f_strocnxE(1:1) /= 'x') & + call accum_hist_field(n_strocnxE, iblk, strocnxE(:,:,iblk), a2D) + if (f_strocnyE(1:1) /= 'x') & + call accum_hist_field(n_strocnyE, iblk, strocnyE(:,:,iblk), a2D) + if (f_strintxN(1:1) /= 'x') & + call accum_hist_field(n_strintxN, iblk, strintxN(:,:,iblk), a2D) + if (f_strintyN(1:1) /= 'x') & + call accum_hist_field(n_strintyN, iblk, strintyN(:,:,iblk), a2D) + if (f_strintxE(1:1) /= 'x') & + call accum_hist_field(n_strintxE, iblk, strintxE(:,:,iblk), a2D) + if (f_strintyE(1:1) /= 'x') & + call accum_hist_field(n_strintyE, iblk, strintyE(:,:,iblk), a2D) + if (f_taubxN(1:1) /= 'x') & + call accum_hist_field(n_taubxN, iblk, taubxN(:,:,iblk), a2D) + if (f_taubyN(1:1) /= 'x') & + call accum_hist_field(n_taubyN, iblk, taubyN(:,:,iblk), a2D) + if (f_taubxE(1:1) /= 'x') & + call accum_hist_field(n_taubxE, iblk, taubxE(:,:,iblk), a2D) + if (f_taubyE(1:1) /= 'x') & + call accum_hist_field(n_taubyE, iblk, taubyE(:,:,iblk), a2D) if (f_strength(1:1)/= 'x') & call accum_hist_field(n_strength,iblk, strength(:,:,iblk), a2D) @@ -3925,17 +4399,28 @@ subroutine accum_hist (dt) ! snapshots !--------------------------------------------------------------- - ! compute sig1 and sig2 - - call principal_stress (nx_block, ny_block, & - stressp_1 (:,:,iblk), & - stressm_1 (:,:,iblk), & - stress12_1(:,:,iblk), & - strength (:,:,iblk), & - sig1 (:,:,iblk), & - sig2 (:,:,iblk), & - sigP (:,:,iblk)) - + ! compute sig1 and sig2 + select case (grid_ice) + case('B') + call principal_stress (nx_block, ny_block, & + stressp_1 (:,:,iblk), & + stressm_1 (:,:,iblk), & + stress12_1(:,:,iblk), & + strength (:,:,iblk), & + sig1 (:,:,iblk), & + sig2 (:,:,iblk), & + sigP (:,:,iblk)) + case('CD','C') + call principal_stress (nx_block, ny_block, & + stresspT (:,:,iblk), & + stressmT (:,:,iblk), & + stress12T (:,:,iblk), & + strength (:,:,iblk), & + sig1 (:,:,iblk), & + sig2 (:,:,iblk), & + sigP (:,:,iblk)) + end select + do j = jlo, jhi do i = ilo, ihi if (.not. tmask(i,j,iblk)) then ! mask out land points diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index 9b58deeec..aea1d4bcf 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -120,9 +120,9 @@ module ice_history_shared avail_hist_fields(max_avail_hist_fields) integer (kind=int_kind), parameter, public :: & - nvar = 12 , & ! number of grid fields that can be written + nvar_grd = 21 , & ! number of grid fields that can be written ! excluding grid vertices - nvarz = 6 ! number of category/vertical grid fields written + nvar_grdz = 6 ! number of category/vertical grid fields written integer (kind=int_kind), public :: & ncat_hist , & ! number of thickness categories written <= ncat @@ -152,32 +152,52 @@ module ice_history_shared avgct(max_nstrm) ! average sample counter logical (kind=log_kind), public :: & - igrd (nvar), & ! true if grid field is written to output file - igrdz(nvarz) ! true if category/vertical grid field is written + igrd (nvar_grd), & ! true if grid field is written to output file + igrdz(nvar_grdz) ! true if category/vertical grid field is written character (len=25), public, parameter :: & tcstr = 'area: tarea' , & ! vcellmeas for T cell quantities ucstr = 'area: uarea' , & ! vcellmeas for U cell quantities + ncstr = 'area: narea' , & ! vcellmeas for N cell quantities + ecstr = 'area: earea' , & ! vcellmeas for E cell quantities tstr2D = 'TLON TLAT time' , & ! vcoord for T cell quantities, 2D ustr2D = 'ULON ULAT time' , & ! vcoord for U cell quantities, 2D + nstr2D = 'NLON NLAT time' , & ! vcoord for N cell quantities, 2D + estr2D = 'ELON ELAT time' , & ! vcoord for E cell quantities, 2D tstr3Dz = 'TLON TLAT VGRDi time',& ! vcoord for T cell quantities, 3D ustr3Dz = 'ULON ULAT VGRDi time',& ! vcoord for U cell quantities, 3D + nstr3Dz = 'NLON NLAT VGRDi time',& ! vcoord for N cell quantities, 3D + estr3Dz = 'ELON ELAT VGRDi time',& ! vcoord for E cell quantities, 3D tstr3Dc = 'TLON TLAT NCAT time',& ! vcoord for T cell quantities, 3D ustr3Dc = 'ULON ULAT NCAT time',& ! vcoord for U cell quantities, 3D + nstr3Dc = 'NLON NLAT NCAT time',& ! vcoord for N cell quantities, 3D + estr3Dc = 'ELON ELAT NCAT time',& ! vcoord for E cell quantities, 3D tstr3Db = 'TLON TLAT VGRDb time',& ! vcoord for T cell quantities, 3D ustr3Db = 'ULON ULAT VGRDb time',& ! vcoord for U cell quantities, 3D + nstr3Db = 'NLON NLAT VGRDb time',& ! vcoord for N cell quantities, 3D + estr3Db = 'ELON ELAT VGRDb time',& ! vcoord for E cell quantities, 3D tstr3Da = 'TLON TLAT VGRDa time',& ! vcoord for T cell quantities, 3D ustr3Da = 'ULON ULAT VGRDa time',& ! vcoord for U cell quantities, 3D + nstr3Da = 'NLON NLAT VGRDa time',& ! vcoord for N cell quantities, 3D + estr3Da = 'ELON ELAT VGRDa time',& ! vcoord for E cell quantities, 3D tstr3Df = 'TLON TLAT NFSD time',& ! vcoord for T cell quantities, 3D ustr3Df = 'ULON ULAT NFSD time',& ! vcoord for U cell quantities, 3D + nstr3Df = 'NLON NLAT NFSD time',& ! vcoord for N cell quantities, 3D + estr3Df = 'ELON ELAT NFSD time',& ! vcoord for E cell quantities, 3D !ferret tstr4Di = 'TLON TLAT VGRDi NCAT', & ! vcoord for T cell, 4D, ice ustr4Di = 'ULON ULAT VGRDi NCAT', & ! vcoord for U cell, 4D, ice + nstr4Di = 'NLON NLAT VGRDi NCAT', & ! vcoord for N cell, 4D, ice + estr4Di = 'ELON ELAT VGRDi NCAT', & ! vcoord for E cell, 4D, ice tstr4Ds = 'TLON TLAT VGRDs NCAT', & ! vcoord for T cell, 4D, snow ustr4Ds = 'ULON ULAT VGRDs NCAT', & ! vcoord for U cell, 4D, snow + nstr4Ds = 'NLON NLAT VGRDs NCAT', & ! vcoord for N cell, 4D, snow + estr4Ds = 'ELON ELAT VGRDs NCAT', & ! vcoord for E cell, 4D, snow tstr4Df = 'TLON TLAT NFSD NCAT', & ! vcoord for T cell, 4D, fsd - ustr4Df = 'ULON ULAT NFSD NCAT' ! vcoord for U cell, 4D, fsd + ustr4Df = 'ULON ULAT NFSD NCAT', & ! vcoord for U cell, 4D, fsd + nstr4Df = 'NLON NLAT NFSD NCAT', & ! vcoord for N cell, 4D, fsd + estr4Df = 'ELON ELAT NFSD NCAT' ! vcoord for E cell, 4D, fsd !ferret ! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time ! ustr4Di = 'ULON ULAT VGRDi NCAT time', & ! index on 4D variables. @@ -193,10 +213,15 @@ module ice_history_shared !--------------------------------------------------------------- logical (kind=log_kind), public :: & - f_tmask = .true., f_blkmask = .true., & + f_tmask = .true., f_umask = .true., & + f_nmask = .true., f_emask = .true., & + f_blkmask = .true., & f_tarea = .true., f_uarea = .true., & + f_narea = .true., f_earea = .true., & f_dxt = .true., f_dyt = .true., & f_dxu = .true., f_dyu = .true., & + f_dxn = .true., f_dyn = .true., & + f_dxe = .true., f_dye = .true., & f_HTN = .true., f_HTE = .true., & f_ANGLE = .true., f_ANGLET = .true., & f_bounds = .true., f_NCAT = .true., & @@ -210,6 +235,11 @@ module ice_history_shared f_snowfrac = 'x', f_snowfracn = 'x', & f_Tsfc = 'm', f_aice = 'm', & f_uvel = 'm', f_vvel = 'm', & + f_icespd = 'm', f_icedir = 'm', & + f_uvelE = 'x', f_vvelE = 'x', & + f_icespdE = 'x', f_icedirE = 'x', & + f_uvelN = 'x', f_vvelN = 'x', & + f_icespdN = 'x', f_icedirN = 'x', & f_uatm = 'm', f_vatm = 'm', & f_atmspd = 'm', f_atmdir = 'm', & f_fswup = 'm', & @@ -250,6 +280,18 @@ module ice_history_shared f_strocnx = 'm', f_strocny = 'm', & f_strintx = 'm', f_strinty = 'm', & f_taubx = 'm', f_tauby = 'm', & + f_strairxN = 'x', f_strairyN = 'x', & + f_strtltxN = 'x', f_strtltyN = 'x', & + f_strcorxN = 'x', f_strcoryN = 'x', & + f_strocnxN = 'x', f_strocnyN = 'x', & + f_strintxN = 'x', f_strintyN = 'x', & + f_taubxN = 'x', f_taubyN = 'x', & + f_strairxE = 'x', f_strairyE = 'x', & + f_strtltxE = 'x', f_strtltyE = 'x', & + f_strcorxE = 'x', f_strcoryE = 'x', & + f_strocnxE = 'x', f_strocnyE = 'x', & + f_strintxE = 'x', f_strintyE = 'x', & + f_taubxE = 'x', f_taubyE = 'x', & f_strength = 'm', & f_divu = 'm', f_shear = 'm', & f_sig1 = 'm', f_sig2 = 'm', & @@ -339,10 +381,15 @@ module ice_history_shared !--------------------------------------------------------------- namelist / icefields_nml / & - f_tmask , f_blkmask , & + f_tmask , f_umask , & + f_nmask , f_emask , & + f_blkmask , & f_tarea , f_uarea , & + f_narea , f_earea , & f_dxt , f_dyt , & f_dxu , f_dyu , & + f_dxn , f_dyn , & + f_dxe , f_dye , & f_HTN , f_HTE , & f_ANGLE , f_ANGLET , & f_bounds , f_NCAT , & @@ -354,6 +401,12 @@ module ice_history_shared f_snowfrac, f_snowfracn, & f_Tsfc, f_aice , & f_uvel, f_vvel , & + f_icespd, f_icedir , & +! For now, don't allow the users to modify the CD grid quantities. +! f_uvelE, f_vvelE , & +! f_icespdE, f_icedirE , & +! f_uvelN, f_vvelN , & +! f_icespdN, f_icedirN , & f_uatm, f_vatm , & f_atmspd, f_atmdir , & f_fswup, & @@ -394,6 +447,18 @@ module ice_history_shared f_strocnx, f_strocny , & f_strintx, f_strinty , & f_taubx, f_tauby , & +! f_strairxN, f_strairyN , & +! f_strtltxN, f_strtltyN , & +! f_strcorxN, f_strcoryN , & +! f_strocnxN, f_strocnyN , & +! f_strintxN, f_strintyN , & +! f_taubxN, f_taubyN , & +! f_strairxE, f_strairyE , & +! f_strtltxE, f_strtltyE , & +! f_strcorxE, f_strcoryE , & +! f_strocnxE, f_strocnyE , & +! f_strintxE, f_strintyE , & +! f_taubxE, f_taubyE , & f_strength, & f_divu, f_shear , & f_sig1, f_sig2 , & @@ -484,17 +549,26 @@ module ice_history_shared integer (kind=int_kind), parameter, public :: & n_tmask = 1, & - n_blkmask = 2, & - n_tarea = 3, & - n_uarea = 4, & - n_dxt = 5, & - n_dyt = 6, & - n_dxu = 7, & - n_dyu = 8, & - n_HTN = 9, & - n_HTE = 10, & - n_ANGLE = 11, & - n_ANGLET = 12, & + n_umask = 2, & + n_nmask = 3, & + n_emask = 4, & + n_blkmask = 5, & + n_tarea = 6, & + n_uarea = 7, & + n_narea = 8, & + n_earea = 9, & + n_dxt = 10, & + n_dyt = 11, & + n_dxu = 12, & + n_dyu = 13, & + n_dxn = 14, & + n_dyn = 15, & + n_dxe = 16, & + n_dye = 17, & + n_HTN = 18, & + n_HTE = 19, & + n_ANGLE = 20, & + n_ANGLET = 21, & n_NCAT = 1, & n_VGRDi = 2, & @@ -506,7 +580,11 @@ module ice_history_shared n_lont_bnds = 1, & n_latt_bnds = 2, & n_lonu_bnds = 3, & - n_latu_bnds = 4 + n_latu_bnds = 4, & + n_lonn_bnds = 5, & + n_latn_bnds = 6, & + n_lone_bnds = 7, & + n_late_bnds = 8 integer (kind=int_kind), dimension(max_nstrm), public :: & ! n_example , & @@ -514,6 +592,11 @@ module ice_history_shared n_snowfrac , n_snowfracn , & n_Tsfc , n_aice , & n_uvel , n_vvel , & + n_icespd , n_icedir , & + n_uvelE , n_vvelE , & + n_icespdE , n_icedirE , & + n_uvelN , n_vvelN , & + n_icespdN , n_icedirN , & n_uatm , n_vatm , & n_atmspd , n_atmdir , & n_sice , & @@ -556,6 +639,18 @@ module ice_history_shared n_strocnx , n_strocny , & n_strintx , n_strinty , & n_taubx , n_tauby , & + n_strairxN , n_strairyN , & + n_strtltxN , n_strtltyN , & + n_strcorxN , n_strcoryN , & + n_strocnxN , n_strocnyN , & + n_strintxN , n_strintyN , & + n_taubxN , n_taubyN , & + n_strairxE , n_strairyE , & + n_strtltxE , n_strtltyE , & + n_strcorxE , n_strcoryE , & + n_strocnxE , n_strocnyE , & + n_strintxE , n_strintyE , & + n_taubxE , n_taubyE , & n_strength , & n_divu , n_shear , & n_sig1 , n_sig2 , & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index eaaa54cd1..2b356ace0 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -5,14 +5,14 @@ ! ! See: ! -! Wilchinsky, A.V. and D.L. Feltham (2006). Modelling the rheology of -! sea ice as a collection of diamond-shaped floes. +! Wilchinsky, A.V. and D.L. Feltham (2006). Modelling the rheology of +! sea ice as a collection of diamond-shaped floes. ! Journal of Non-Newtonian Fluid Mechanics, 138(1), 22-32. ! ! Tsamados, M., D.L. Feltham, and A.V. Wilchinsky (2013). Impact on new ! anisotropic rheology on simulations of Arctic sea ice. JGR, 118, 91-107. ! -! authors: Michel Tsamados, CPOM +! authors: Michel Tsamados, CPOM ! David Schroeder, CPOM module ice_dyn_eap @@ -40,30 +40,30 @@ module ice_dyn_eap alloc_dyn_eap ! Look-up table needed for calculating structure tensor - integer (int_kind), parameter :: & - nx_yield = 41, & - ny_yield = 41, & - na_yield = 21 + integer (int_kind), parameter :: & + nx_yield = 41, & + ny_yield = 41, & + na_yield = 21 - real (kind=dbl_kind), dimension (nx_yield,ny_yield,na_yield) :: & - s11r, s12r, s22r, s11s, s12s, s22s + real (kind=dbl_kind), dimension (nx_yield,ny_yield,na_yield) :: & + s11r, s12r, s22r, s11s, s12s, s22s real (kind=dbl_kind), dimension (:,:,:), allocatable :: & - a11_1, a11_2, a11_3, a11_4, & ! components of - a12_1, a12_2, a12_3, a12_4 ! structure tensor + a11_1, a11_2, a11_3, a11_4, & ! components of + a12_1, a12_2, a12_3, a12_4 ! structure tensor ! history real (kind=dbl_kind), dimension(:,:,:), allocatable, public :: & - e11 , & ! components of strain rate tensor (1/s) - e12 , & - e22 , & + e11 , & ! components of strain rate tensor (1/s) + e12 , & + e22 , & yieldstress11, & ! components of yield stress tensor (kg/s^2) yieldstress12, & yieldstress22, & - s11 , & ! components of stress tensor (kg/s^2) - s12 , & - s22 , & - a11 , & ! components of structure tensor () + s11 , & ! components of stress tensor (kg/s^2) + s12 , & + s22 , & + a11 , & ! components of structure tensor () a12 ! private for reuse, set in init_eap @@ -76,13 +76,14 @@ module ice_dyn_eap contains !======================================================================= -! -! Allocate space for all variables +! Allocate space for all variables ! subroutine alloc_dyn_eap integer (int_kind) :: ierr + character(len=*), parameter :: subname = '(alloc_dyn_eap)' + allocate( a11_1 (nx_block,ny_block,max_blocks), & a11_2 (nx_block,ny_block,max_blocks), & a11_3 (nx_block,ny_block,max_blocks), & @@ -103,12 +104,11 @@ subroutine alloc_dyn_eap a11 (nx_block,ny_block,max_blocks), & a12 (nx_block,ny_block,max_blocks), & stat=ierr) - if (ierr/=0) call abort_ice('(alloc_dyn_eap): Out of memory') + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory') end subroutine alloc_dyn_eap !======================================================================= -! ! Elastic-anisotropic-plastic dynamics driver ! based on subroutine evp @@ -116,9 +116,9 @@ subroutine eap (dt) #ifdef CICE_IN_NEMO ! Wind stress is set during this routine from the values supplied -! via NEMO (unless calc_strair is true). These values are supplied -! rotated on u grid and multiplied by aice. strairxT = 0 in this -! case so operations in dyn_prep1 are pointless but carried out to +! via NEMO (unless calc_strair is true). These values are supplied +! rotated on u grid and multiplied by aice. strairxT = 0 in this +! case so operations in dyn_prep1 are pointless but carried out to ! minimise code changes. #endif @@ -134,7 +134,7 @@ subroutine eap (dt) dyn_prep1, dyn_prep2, stepu, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & seabed_stress_method, seabed_stress, & - stack_velocity_field, unstack_velocity_field + stack_fields, unstack_fields use ice_flux, only: rdg_conv, strairxT, strairyT, & strairx, strairy, uocn, vocn, ss_tltx, ss_tlty, iceumask, fm, & strtltx, strtlty, strocnx, strocny, strintx, strinty, taubx, tauby, & @@ -144,7 +144,8 @@ subroutine eap (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, to_ugrid, t2ugrid_vector, u2tgrid_vector + tarear, uarear, grid_average_X2Y, & + grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & @@ -155,49 +156,57 @@ subroutine eap (dt) ! local variables - integer (kind=int_kind) :: & - ksub , & ! subcycle step - iblk , & ! block index + integer (kind=int_kind) :: & + ksub , & ! subcycle step + iblk , & ! block index ilo,ihi,jlo,jhi, & ! beginning and end of physical domain i, j, ij - integer (kind=int_kind), dimension(max_blocks) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + integer (kind=int_kind), dimension(max_blocks) :: & + icellt , & ! no. of cells where icetmask = 1 + icellu ! no. of cells where iceumask = 1 integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxti , & ! compressed index in i-direction + indxtj , & ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - tmass , & ! total mass of ice and snow (kg/m^2) - waterx , & ! for ocean stress calculation, x (m/s) - watery , & ! for ocean stress calculation, y (m/s) - forcex , & ! work array: combined atm stress and ocn tilt, x - forcey , & ! work array: combined atm stress and ocn tilt, y - aiu , & ! ice fraction on u-grid - umass , & ! total mass of ice and snow (u grid) - umassdti ! mass of U-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), allocatable :: fld2(:,:,:,:) + uocnU , & ! i ocean current (m/s) + vocnU , & ! j ocean current (m/s) + tmass , & ! total mass of ice and snow (kg/m^2) + waterx , & ! for ocean stress calculation, x (m/s) + watery , & ! for ocean stress calculation, y (m/s) + forcex , & ! work array: combined atm stress and ocn tilt, x + forcey , & ! work array: combined atm stress and ocn tilt, y + aiu , & ! ice fraction on u-grid + umass , & ! total mass of ice and snow (u grid) + umassdti ! mass of U-cell/dte (kg/m^2 s) + + real (kind=dbl_kind), allocatable :: & + fld2(:,:,:,:) ! temporary for stacking fields for halo update real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & - strtmp ! stress combinations for momentum equation + strtmp ! stress combinations for momentum equation - logical (kind=log_kind) :: calc_strair + logical (kind=log_kind) :: & + calc_strair integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask, & ! ice extent mask (T-cell) - halomask ! ice mask for halo update + icetmask , & ! ice extent mask (T-cell) + halomask ! ice mask for halo update type (ice_halo) :: & halo_info_mask ! ghost cell update info for masked halo type (block) :: & - this_block ! block information for current block - + this_block ! block information for current block + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 , & ! temporary + work2 ! temporary + character(len=*), parameter :: subname = '(eap)' call ice_timer_start(timer_dynamics) ! dynamics @@ -213,12 +222,12 @@ subroutine eap (dt) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - rdg_conv (i,j,iblk) = c0 -! rdg_shear(i,j,iblk) = c0 - divu (i,j,iblk) = c0 - shear(i,j,iblk) = c0 + do j = 1, ny_block + do i = 1, nx_block + rdg_conv (i,j,iblk) = c0 +! rdg_shear(i,j,iblk) = c0 + divu (i,j,iblk) = c0 + shear(i,j,iblk) = c0 e11(i,j,iblk) = c0 e12(i,j,iblk) = c0 e22(i,j,iblk) = c0 @@ -231,22 +240,20 @@ subroutine eap (dt) enddo enddo - !----------------------------------------------------------------- - ! preparation for dynamics - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! preparation for dynamics + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - call dyn_prep1 (nx_block, ny_block, & + call dyn_prep1 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - aice (:,:,iblk), vice (:,:,iblk), & - vsno (:,:,iblk), tmask (:,:,iblk), & - strairxT(:,:,iblk), strairyT(:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & + aice (:,:,iblk), vice (:,:,iblk), & + vsno (:,:,iblk), tmask (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) enddo ! iblk @@ -261,77 +268,84 @@ subroutine eap (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call to_ugrid(tmass,umass) - call to_ugrid(aice_init, aiu) + call grid_average_X2Y('F', tmass , 'T' , umass, 'U') + call grid_average_X2Y('F', aice_init, 'T' , aiu , 'U') + call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU, 'U') + call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU, 'U') !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing ! This wind stress is rotated on u grid and multiplied by aice !---------------------------------------------------------------- + call icepack_query_parameters(calc_strair_out=calc_strair) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (.not. calc_strair) then - strairx(:,:,:) = strax(:,:,:) - strairy(:,:,:) = stray(:,:,:) + if (.not. calc_strair) then + call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') else - call t2ugrid_vector(strairx) - call t2ugrid_vector(strairy) + call ice_HaloUpdate (strairxT, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairyT, halo_info, & + field_loc_center, field_type_vector) + call grid_average_X2Y('F', strairxT, 'T', strairx, 'U') + call grid_average_X2Y('F', strairyT, 'T', strairy, 'U') endif !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) do iblk = 1, nblocks - !----------------------------------------------------------------- - ! more preparation for dynamics - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! more preparation for dynamics + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - call dyn_prep2 (nx_block, ny_block, & + call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt(iblk), icellu(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), umass (:,:,iblk), & - umassdti (:,:,iblk), fcor_blk (:,:,iblk), & - umask (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & - ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & - fm (:,:,iblk), dt, & - strtltx (:,:,iblk), strtlty (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + icellt (iblk), icellu (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), umass (:,:,iblk), & + umassdti (:,:,iblk), fcor_blk (:,:,iblk), & + umask (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & + icetmask (:,:,iblk), iceumask (:,:,iblk), & + fm (:,:,iblk), dt, & + strtltx (:,:,iblk), strtlty (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) - !----------------------------------------------------------------- - ! Initialize structure tensor - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! Initialize structure tensor + !----------------------------------------------------------------- do j = 1, ny_block do i = 1, nx_block if (icetmask(i,j,iblk)==0) then if (tmask(i,j,iblk)) then - ! structure tensor + ! structure tensor a11_1(i,j,iblk) = p5 a11_2(i,j,iblk) = p5 a11_3(i,j,iblk) = p5 @@ -350,21 +364,21 @@ subroutine eap (dt) enddo ! i enddo ! j - !----------------------------------------------------------------- - ! ice strength - ! New strength used in Ukita Moritz rheology - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! ice strength + ! New strength used in Ukita Moritz rheology + !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize do ij = 1, icellt(iblk) i = indxti(ij, iblk) j = indxtj(ij, iblk) call icepack_ice_strength(ncat=ncat, & - aice = aice (i,j, iblk), & - vice = vice (i,j, iblk), & - aice0 = aice0 (i,j, iblk), & - aicen = aicen (i,j,:,iblk), & - vicen = vicen (i,j,:,iblk), & + aice = aice (i,j, iblk), & + vice = vice (i,j, iblk), & + aice0 = aice0 (i,j, iblk), & + aicen = aicen (i,j,:,iblk), & + vicen = vicen (i,j,:,iblk), & strength = strength(i,j, iblk) ) enddo ! ij enddo ! iblk @@ -378,10 +392,10 @@ subroutine eap (dt) call ice_HaloUpdate (strength, halo_info, & field_loc_center, field_type_scalar) ! velocities may have changed in dyn_prep2 - call stack_velocity_field(uvel, vvel, fld2) + call stack_fields(uvel, vvel, fld2) call ice_HaloUpdate (fld2, halo_info, & field_loc_NEcorner, field_type_vector) - call unstack_velocity_field(fld2, uvel, vvel) + call unstack_fields(fld2, uvel, vvel) call ice_timer_stop(timer_bound) if (maskhalo_dyn) then @@ -395,40 +409,39 @@ subroutine eap (dt) endif !----------------------------------------------------------------- - ! seabed stress factor Tbu (Tbu is part of Cb coefficient) + ! seabed stress factor Tbu (Tbu is part of Cb coefficient) !----------------------------------------------------------------- - + if (seabed_stress) then if ( seabed_stress_method == 'LKD' ) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call seabed_stress_factor_LKD (nx_block, ny_block, & - icellu (iblk), & - indxui(:,iblk), indxuj(:,iblk), & - vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) + call seabed_stress_factor_LKD (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj(:,iblk), & + vice (:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), Tbu (:,:,iblk)) enddo !$OMP END PARALLEL DO elseif ( seabed_stress_method == 'probabilistic' ) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - - call seabed_stress_factor_prob (nx_block, ny_block, & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & - aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) + call seabed_stress_factor_prob (nx_block , ny_block , & + icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & + icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + aicen(:,:,:,iblk), vicen(:,:,:,iblk), & + hwater (:,:,iblk), Tbu (:,:,iblk)) enddo !$OMP END PARALLEL DO endif endif - + do ksub = 1,ndte ! subcycling - !----------------------------------------------------------------- - ! stress tensor equation, total surface stress - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! stress tensor equation, total surface stress + !----------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) do iblk = 1, nblocks @@ -436,19 +449,19 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp1,iblk) call stress_eap (nx_block, ny_block, & ksub, ndte, & - icellt(iblk), & + icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & - arlx1i, denom1, & + arlx1i, denom1, & uvel (:,:,iblk), vvel (:,:,iblk), & dxt (:,:,iblk), dyt (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & tarear (:,:,iblk), strength (:,:,iblk), & - a11_1 (:,:,iblk), a11_2 (:,:,iblk), & - a11_3 (:,:,iblk), a11_4 (:,:,iblk), & - a12_1 (:,:,iblk), a12_2 (:,:,iblk), & - a12_3 (:,:,iblk), a12_4 (:,:,iblk), & + a11_1 (:,:,iblk), a11_2 (:,:,iblk), & + a11_3 (:,:,iblk), a11_4 (:,:,iblk), & + a12_1 (:,:,iblk), a12_2 (:,:,iblk), & + a12_3 (:,:,iblk), a12_4 (:,:,iblk), & stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & @@ -460,53 +473,53 @@ subroutine eap (dt) e22 (:,:,iblk), & s11 (:,:,iblk), s12 (:,:,iblk), & s22 (:,:,iblk), & - yieldstress11 (:,:,iblk), & - yieldstress12 (:,:,iblk), & - yieldstress22 (:,:,iblk), & -! rdg_conv (:,:,iblk), rdg_shear (:,:,iblk), & + yieldstress11 (:,:,iblk), & + yieldstress12 (:,:,iblk), & + yieldstress22 (:,:,iblk), & +! rdg_conv (:,:,iblk), rdg_shear (:,:,iblk), & rdg_conv (:,:,iblk), & strtmp (:,:,:)) ! call ice_timer_stop(timer_tmp1,iblk) - !----------------------------------------------------------------- - ! momentum equation - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! momentum equation + !----------------------------------------------------------------- ! call ice_timer_start(timer_tmp2,iblk) - call stepu (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), strtmp (:,:,:), & - uocn (:,:,iblk), vocn (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - umassdti (:,:,iblk), fm (:,:,iblk), & - uarear (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - uvel_init(:,:,iblk), vvel_init(:,:,iblk),& - uvel (:,:,iblk), vvel (:,:,iblk), & + call stepu (nx_block, ny_block, & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), strtmp (:,:,:), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + umassdti (:,:,iblk), fm (:,:,iblk), & + uarear (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + uvel_init(:,:,iblk), vvel_init(:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) ! call ice_timer_stop(timer_tmp2,iblk) - !----------------------------------------------------------------- - ! evolution of structure tensor A - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! evolution of structure tensor A + !----------------------------------------------------------------- ! call ice_timer_start(timer_tmp3,iblk) if (mod(ksub,10) == 1) then ! only called every 10th timestep - call stepa (nx_block, ny_block, & - dtei, icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - a11 (:,:,iblk), a12 (:,:,iblk), & - a11_1 (:,:,iblk), a11_2 (:,:,iblk), & - a11_3 (:,:,iblk), a11_4 (:,:,iblk), & - a12_1 (:,:,iblk), a12_2 (:,:,iblk), & - a12_3 (:,:,iblk), a12_4 (:,:,iblk), & - stressp_1(:,:,iblk), stressp_2(:,:,iblk), & - stressp_3(:,:,iblk), stressp_4(:,:,iblk), & - stressm_1(:,:,iblk), stressm_2(:,:,iblk), & - stressm_3(:,:,iblk), stressm_4(:,:,iblk), & + call stepa (nx_block , ny_block , & + dtei , icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + a11 (:,:,iblk), a12 (:,:,iblk), & + a11_1 (:,:,iblk), a11_2 (:,:,iblk), & + a11_3 (:,:,iblk), a11_4 (:,:,iblk), & + a12_1 (:,:,iblk), a12_2 (:,:,iblk), & + a12_3 (:,:,iblk), a12_4 (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & stress12_1(:,:,iblk), stress12_2(:,:,iblk), & stress12_3(:,:,iblk), stress12_4(:,:,iblk)) endif @@ -514,7 +527,7 @@ subroutine eap (dt) enddo !$OMP END PARALLEL DO - call stack_velocity_field(uvel, vvel, fld2) + call stack_fields(uvel, vvel, fld2) call ice_timer_start(timer_bound) if (maskhalo_dyn) then call ice_HaloUpdate (fld2, halo_info_mask, & @@ -524,7 +537,7 @@ subroutine eap (dt) field_loc_NEcorner, field_type_vector) endif call ice_timer_stop(timer_bound) - call unstack_velocity_field(fld2, uvel, vvel) + call unstack_fields(fld2, uvel, vvel) enddo ! subcycling @@ -538,28 +551,44 @@ subroutine eap (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call dyn_finish & - (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + call dyn_finish & + (nx_block, ny_block, & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strocnxT(:,:,iblk), strocnyT(:,:,iblk)) + strocnx (:,:,iblk), strocny (:,:,iblk)) enddo !$OMP END PARALLEL DO - call u2tgrid_vector(strocnxT) ! shift - call u2tgrid_vector(strocnyT) + ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! TODO: This should be done elsewhere as part of generalization? + ! conservation requires aiu be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) + do iblk = 1, nblocks + do ij = 1, icellu(iblk) + i = indxui(ij,iblk) + j = indxuj(ij,iblk) + work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) + work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + enddo + enddo + call ice_HaloUpdate (work1, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_HaloUpdate (work2, halo_info, & + field_loc_NEcorner, field_type_vector) + call grid_average_X2Y('F', work1, 'U', strocnxT, 'T') ! shift + call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') call ice_timer_stop(timer_dynamics) ! dynamics end subroutine eap !======================================================================= - ! Initialize parameters and variables needed for the eap dynamics ! (based on init_dyn) @@ -574,16 +603,16 @@ subroutine init_eap i, j, & iblk ! block index - real (kind=dbl_kind), parameter :: & + real (kind=dbl_kind), parameter :: & eps6 = 1.0e-6_dbl_kind - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ix, iy, iz, ia - integer (kind=int_kind), parameter :: & + integer (kind=int_kind), parameter :: & nz = 100 - real (kind=dbl_kind) :: & + real (kind=dbl_kind) :: & ainit, xinit, yinit, zinit, & da, dx, dy, dz, & phi @@ -602,23 +631,23 @@ subroutine init_eap do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - e11(i,j,iblk) = c0 - e12(i,j,iblk) = c0 - e22(i,j,iblk) = c0 - s11(i,j,iblk) = c0 - s12(i,j,iblk) = c0 - s22(i,j,iblk) = c0 + e11 (i,j,iblk) = c0 + e12 (i,j,iblk) = c0 + e22 (i,j,iblk) = c0 + s11 (i,j,iblk) = c0 + s12 (i,j,iblk) = c0 + s22 (i,j,iblk) = c0 yieldstress11(i,j,iblk) = c0 yieldstress12(i,j,iblk) = c0 yieldstress22(i,j,iblk) = c0 - a11_1 (i,j,iblk) = p5 - a11_2 (i,j,iblk) = p5 - a11_3 (i,j,iblk) = p5 - a11_4 (i,j,iblk) = p5 - a12_1 (i,j,iblk) = c0 - a12_2 (i,j,iblk) = c0 - a12_3 (i,j,iblk) = c0 - a12_4 (i,j,iblk) = c0 + a11_1 (i,j,iblk) = p5 + a11_2 (i,j,iblk) = p5 + a11_3 (i,j,iblk) = p5 + a11_4 (i,j,iblk) = p5 + a12_1 (i,j,iblk) = c0 + a12_2 (i,j,iblk) = c0 + a12_3 (i,j,iblk) = c0 + a12_4 (i,j,iblk) = c0 enddo ! i enddo ! j enddo ! iblk @@ -638,8 +667,8 @@ subroutine init_eap yinit = -dy do ia=1,na_yield - do ix=1,nx_yield - do iy=1,ny_yield + do ix=1,nx_yield + do iy=1,ny_yield s11r(ix,iy,ia) = c0 s12r(ix,iy,ia) = c0 s22r(ix,iy,ia) = c0 @@ -647,48 +676,48 @@ subroutine init_eap s12s(ix,iy,ia) = c0 s22s(ix,iy,ia) = c0 if (ia <= na_yield-1) then - do iz=1,nz - s11r(ix,iy,ia) = s11r(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s11kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - s12r(ix,iy,ia) = s12r(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s12kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - s22r(ix,iy,ia) = s22r(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s22kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - s11s(ix,iy,ia) = s11s(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s11ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - s12s(ix,iy,ia) = s12s(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s12ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - s22s(ix,iy,ia) = s22s(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s22ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - enddo - if (abs(s11r(ix,iy,ia)) < eps6) s11r(ix,iy,ia) = c0 - if (abs(s12r(ix,iy,ia)) < eps6) s12r(ix,iy,ia) = c0 - if (abs(s22r(ix,iy,ia)) < eps6) s22r(ix,iy,ia) = c0 - if (abs(s11s(ix,iy,ia)) < eps6) s11s(ix,iy,ia) = c0 - if (abs(s12s(ix,iy,ia)) < eps6) s12s(ix,iy,ia) = c0 - if (abs(s22s(ix,iy,ia)) < eps6) s22s(ix,iy,ia) = c0 + do iz=1,nz + s11r(ix,iy,ia) = s11r(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s11kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s12r(ix,iy,ia) = s12r(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s12kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s22r(ix,iy,ia) = s22r(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s22kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s11s(ix,iy,ia) = s11s(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s11ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s12s(ix,iy,ia) = s12s(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s12ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s22s(ix,iy,ia) = s22s(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s22ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + enddo + if (abs(s11r(ix,iy,ia)) < eps6) s11r(ix,iy,ia) = c0 + if (abs(s12r(ix,iy,ia)) < eps6) s12r(ix,iy,ia) = c0 + if (abs(s22r(ix,iy,ia)) < eps6) s22r(ix,iy,ia) = c0 + if (abs(s11s(ix,iy,ia)) < eps6) s11s(ix,iy,ia) = c0 + if (abs(s12s(ix,iy,ia)) < eps6) s12s(ix,iy,ia) = c0 + if (abs(s22s(ix,iy,ia)) < eps6) s22s(ix,iy,ia) = c0 else - s11r(ix,iy,ia) = p5*s11kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - s12r(ix,iy,ia) = p5*s12kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - s22r(ix,iy,ia) = p5*s22kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - s11s(ix,iy,ia) = p5*s11ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - s12s(ix,iy,ia) = p5*s12ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - s22s(ix,iy,ia) = p5*s22ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - if (abs(s11r(ix,iy,ia)) < eps6) s11r(ix,iy,ia) = c0 - if (abs(s12r(ix,iy,ia)) < eps6) s12r(ix,iy,ia) = c0 - if (abs(s22r(ix,iy,ia)) < eps6) s22r(ix,iy,ia) = c0 - if (abs(s11s(ix,iy,ia)) < eps6) s11s(ix,iy,ia) = c0 - if (abs(s12s(ix,iy,ia)) < eps6) s12s(ix,iy,ia) = c0 - if (abs(s22s(ix,iy,ia)) < eps6) s22s(ix,iy,ia) = c0 + s11r(ix,iy,ia) = p5*s11kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s12r(ix,iy,ia) = p5*s12kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s22r(ix,iy,ia) = p5*s22kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s11s(ix,iy,ia) = p5*s11ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s12s(ix,iy,ia) = p5*s12ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s22s(ix,iy,ia) = p5*s22ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + if (abs(s11r(ix,iy,ia)) < eps6) s11r(ix,iy,ia) = c0 + if (abs(s12r(ix,iy,ia)) < eps6) s12r(ix,iy,ia) = c0 + if (abs(s22r(ix,iy,ia)) < eps6) s22r(ix,iy,ia) = c0 + if (abs(s11s(ix,iy,ia)) < eps6) s11s(ix,iy,ia) = c0 + if (abs(s12s(ix,iy,ia)) < eps6) s12s(ix,iy,ia) = c0 + if (abs(s22s(ix,iy,ia)) < eps6) s22s(ix,iy,ia) = c0 endif - enddo - enddo + enddo + enddo enddo end subroutine init_eap @@ -736,23 +765,23 @@ end FUNCTION w2 !======================================================================= ! Function : s11kr - FUNCTION s11kr(x,y,z,phi) + FUNCTION s11kr(x,y,z,phi) real (kind=dbl_kind), intent(in) :: & x,y,z,phi real (kind=dbl_kind) :: & - s11kr, p + s11kr, p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & -! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & -! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & - d11, d12, d22, & - IIn1t2, IIn2t1, & -! IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & +! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & +! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, & +! IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s11kr)' @@ -784,15 +813,15 @@ FUNCTION s11kr(x,y,z,phi) ! IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s11kr = (- Hen1t2 * n1t2i11 - Hen2t1 * n2t1i11) @@ -805,20 +834,20 @@ end FUNCTION s11kr FUNCTION s12kr(x,y,z,phi) real (kind=dbl_kind), intent(in) :: & - x,y,z,phi + x,y,z,phi real (kind=dbl_kind) :: & - s12kr, s12r0, s21r0, p + s12kr, s12r0, s21r0, p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & -! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & -! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & - d11, d12, d22, & - IIn1t2, IIn2t1, & -! IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & +! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & +! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, & +! IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s12kr)' @@ -848,15 +877,15 @@ FUNCTION s12kr(x,y,z,phi) ! IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s12r0 = (- Hen1t2 * n1t2i12 - Hen2t1 * n2t1i12) @@ -871,20 +900,20 @@ end FUNCTION s12kr FUNCTION s22kr(x,y,z,phi) real (kind=dbl_kind), intent(in) :: & - x,y,z,phi + x,y,z,phi real (kind=dbl_kind) :: & - s22kr, p + s22kr, p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & -! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & -! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & - d11, d12, d22, & - IIn1t2, IIn2t1, & -! IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & +! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & +! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, & +! IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s22kr)' @@ -914,15 +943,15 @@ FUNCTION s22kr(x,y,z,phi) ! IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s22kr = (- Hen1t2 * n1t2i22 - Hen2t1 * n2t1i22) @@ -935,21 +964,21 @@ end FUNCTION s22kr FUNCTION s11ks(x,y,z,phi) real (kind=dbl_kind), intent(in):: & - x,y,z,phi + x,y,z,phi real (kind=dbl_kind) :: & - s11ks, p + s11ks, p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & - t1t2i11, & - t1t2i12, t1t2i21, t1t2i22, & - t2t1i11, & -! t2t1i12, t2t1i21, t2t1i22, & - d11, d12, d22, & - IIn1t2, IIn2t1, IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, & + t1t2i12, t1t2i21, t1t2i22, & + t2t1i11, & +! t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s11ks)' @@ -979,15 +1008,15 @@ FUNCTION s11ks(x,y,z,phi) IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s11ks = sign(c1,IIt1t2+puny)*(Hen1t2 * t1t2i11 + Hen2t1 * t2t1i11) @@ -1000,20 +1029,20 @@ end FUNCTION s11ks FUNCTION s12ks(x,y,z,phi) real (kind=dbl_kind), intent(in) :: & - x,y,z,phi + x,y,z,phi real (kind=dbl_kind) :: & - s12ks,s12s0,s21s0,p + s12ks,s12s0,s21s0,p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & - t1t2i11, t1t2i12, t1t2i21, t1t2i22, & -! t2t1i11, t2t1i22, & - t2t1i12, t2t1i21, & - d11, d12, d22, & - IIn1t2, IIn2t1, IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, t1t2i12, t1t2i21, t1t2i22, & +! t2t1i11, t2t1i22, & + t2t1i12, t2t1i21, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s12ks)' @@ -1043,15 +1072,15 @@ FUNCTION s12ks(x,y,z,phi) IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s12s0 = sign(c1,IIt1t2+puny)*(Hen1t2 * t1t2i12 + Hen2t1 * t2t1i12) @@ -1063,23 +1092,23 @@ end FUNCTION s12ks !======================================================================= ! Function : s22ks - FUNCTION s22ks(x,y,z,phi) + FUNCTION s22ks(x,y,z,phi) real (kind=dbl_kind), intent(in) :: & - x,y,z,phi + x,y,z,phi real (kind=dbl_kind) :: & - s22ks,p + s22ks,p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & - t1t2i11, t1t2i12, t1t2i21, t1t2i22, & -! t2t1i11, t2t1i12, t2t1i21, & - t2t1i22, & - d11, d12, d22, & - IIn1t2, IIn2t1, IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, t1t2i12, t1t2i21, t1t2i22, & +! t2t1i11, t2t1i12, t2t1i21, & + t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s22ks)' @@ -1109,24 +1138,22 @@ FUNCTION s22ks(x,y,z,phi) IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s22ks = sign(c1,IIt1t2+puny)*(Hen1t2 * t1t2i22 + Hen2t1 * t2t1i22) end FUNCTION s22ks - !======================================================================= - ! Computes the rates of strain and internal stress components for ! each of the four corners on each T-grid cell. ! Computes stress terms for the momentum equation @@ -1163,7 +1190,7 @@ subroutine stress_eap (nx_block, ny_block, & rdg_conv, & strtmp) - integer (kind=int_kind), intent(in) :: & + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions ksub , & ! subcycling step ndte , & ! number of subcycles @@ -1204,11 +1231,11 @@ subroutine stress_eap (nx_block, ny_block, & shear , & ! strain rate II component (1/s) divu , & ! strain rate I component, velocity divergence (1/s) e11 , & ! components of strain rate tensor (1/s) - e12 , & ! - e22 , & ! + e12 , & ! + e22 , & ! s11 , & ! components of stress tensor (kg/s^2) - s12 , & ! - s22 , & ! + s12 , & ! + s22 , & ! yieldstress11, & ! components of yield stress tensor (kg/s^2) yieldstress12, & yieldstress22, & @@ -1229,22 +1256,22 @@ subroutine stress_eap (nx_block, ny_block, & stress12tmp_1,stress12tmp_2,stress12tmp_3,stress12tmp_4 ! sigma12 real (kind=dbl_kind) :: & - divune, divunw, divuse, divusw , & ! divergence - tensionne, tensionnw, tensionse, tensionsw, & ! tension - shearne, shearnw, shearse, shearsw , & ! shearing - ssigpn, ssigps, ssigpe, ssigpw , & - ssigmn, ssigms, ssigme, ssigmw , & - ssig12n, ssig12s, ssig12e, ssig12w , & - ssigp1, ssigp2, ssigm1, ssigm2, ssig121, ssig122, & - csigpne, csigpnw, csigpse, csigpsw , & - csigmne, csigmnw, csigmse, csigmsw , & - csig12ne, csig12nw, csig12se, csig12sw , & - str12ew, str12we, str12ns, str12sn , & - strp_tmp, strm_tmp + divune, divunw, divuse, divusw , & ! divergence + tensionne, tensionnw, tensionse, tensionsw, & ! tension + shearne, shearnw, shearse, shearsw , & ! shearing + ssigpn, ssigps, ssigpe, ssigpw , & + ssigmn, ssigms, ssigme, ssigmw , & + ssig12n, ssig12s, ssig12e, ssig12w , & + ssigp1, ssigp2, ssigm1, ssigm2, ssig121, ssig122, & + csigpne, csigpnw, csigpse, csigpsw , & + csigmne, csigmnw, csigmse, csigmsw , & + csig12ne, csig12nw, csig12se, csig12sw , & + str12ew, str12we, str12ns, str12sn , & + strp_tmp, strm_tmp real (kind=dbl_kind) :: & - alpharne, alpharnw, alpharsw, alpharse, & - alphasne, alphasnw, alphassw, alphasse + alpharne, alpharnw, alpharsw, alpharse, & + alphasne, alphasnw, alphassw, alphasse character(len=*), parameter :: subname = '(stress_eap)' @@ -1258,10 +1285,11 @@ subroutine stress_eap (nx_block, ny_block, & i = indxti(ij) j = indxtj(ij) - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + ! divergence = e_11 + e_22 divune = cyp(i,j)*uvel(i ,j ) - dyt(i,j)*uvel(i-1,j ) & + cxp(i,j)*vvel(i ,j ) - dxt(i,j)*vvel(i ,j-1) @@ -1292,9 +1320,9 @@ subroutine stress_eap (nx_block, ny_block, & shearse = -cym(i,j)*vvel(i ,j-1) - dyt(i,j)*vvel(i-1,j-1) & - cxp(i,j)*uvel(i ,j-1) + dxt(i,j)*uvel(i ,j ) - !----------------------------------------------------------------- - ! Stress updated depending on strain rate and structure tensor - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! Stress updated depending on strain rate and structure tensor + !----------------------------------------------------------------- ! ne call update_stress_rdg (ksub, ndte, divune, tensionne, & @@ -1321,9 +1349,10 @@ subroutine stress_eap (nx_block, ny_block, & stress12tmp_4, strength(i,j), & alpharse, alphasse) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then ! diagnostic only @@ -1348,9 +1377,9 @@ subroutine stress_eap (nx_block, ny_block, & e22(i,j) = p5*p25*(divune + divunw + divuse + divusw - & tensionne - tensionnw - tensionse - tensionsw) * tarear(i,j) - !----------------------------------------------------------------- - ! elastic relaxation, see Eq. A12-A14 - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! elastic relaxation, see Eq. A12-A14 + !----------------------------------------------------------------- stressp_1(i,j) = (stressp_1(i,j) + stressptmp_1*arlx1i) & * denom1 @@ -1379,14 +1408,14 @@ subroutine stress_eap (nx_block, ny_block, & stress12_4(i,j) = (stress12_4(i,j) + stress12tmp_4*arlx1i) & * denom1 - s11(i,j) = p5 * p25 * (stressp_1(i,j) + stressp_2(i,j) & - + stressp_3(i,j) + stressp_4(i,j) & - + stressm_1(i,j) + stressm_2(i,j) & - + stressm_3(i,j) + stressm_4(i,j)) - s22(i,j) = p5 * p25 * (stressp_1(i,j) + stressp_2(i,j) & - + stressp_3(i,j) + stressp_4(i,j) & - - stressm_1(i,j) - stressm_2(i,j) & - - stressm_3(i,j) - stressm_4(i,j)) + s11(i,j) = p5 * p25 * (stressp_1 (i,j) + stressp_2 (i,j) & + + stressp_3 (i,j) + stressp_4 (i,j) & + + stressm_1 (i,j) + stressm_2 (i,j) & + + stressm_3 (i,j) + stressm_4 (i,j)) + s22(i,j) = p5 * p25 * (stressp_1 (i,j) + stressp_2 (i,j) & + + stressp_3 (i,j) + stressp_4 (i,j) & + - stressm_1 (i,j) - stressm_2 (i,j) & + - stressm_3 (i,j) - stressm_4 (i,j)) s12(i,j) = p25 * (stress12_1(i,j) + stress12_2(i,j) & + stress12_3(i,j) + stress12_4(i,j)) @@ -1401,34 +1430,34 @@ subroutine stress_eap (nx_block, ny_block, & yieldstress12(i,j) = p25 * (stress12tmp_1 + stress12tmp_2 & + stress12tmp_3 + stress12tmp_4) - !----------------------------------------------------------------- - ! Eliminate underflows. - ! The following code is commented out because it is relatively - ! expensive and most compilers include a flag that accomplishes - ! the same thing more efficiently. This code is cheaper than - ! handling underflows if the compiler lacks a flag; uncomment - ! it in that case. The compiler flag is often described with the - ! phrase "flush to zero". - !----------------------------------------------------------------- - -! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) -! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) -! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) -! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) - -! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) -! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) -! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) -! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) - -! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) -! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) -! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) -! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) - - !----------------------------------------------------------------- - ! combinations of the stresses for the momentum equation ! kg/s^2 - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! Eliminate underflows. + ! The following code is commented out because it is relatively + ! expensive and most compilers include a flag that accomplishes + ! the same thing more efficiently. This code is cheaper than + ! handling underflows if the compiler lacks a flag; uncomment + ! it in that case. The compiler flag is often described with the + ! phrase "flush to zero". + !----------------------------------------------------------------- + +! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) +! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) +! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) +! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) + +! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) +! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) +! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) +! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) + +! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) +! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) +! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) +! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) + + !----------------------------------------------------------------- + ! combinations of the stresses for the momentum equation ! kg/s^2 + !----------------------------------------------------------------- ssigpn = stressp_1(i,j) + stressp_2(i,j) ssigps = stressp_3(i,j) + stressp_4(i,j) @@ -1455,12 +1484,12 @@ subroutine stress_eap (nx_block, ny_block, & csigpnw = p111*stressp_2(i,j) + ssigp1 + p027*stressp_4(i,j) csigpsw = p111*stressp_3(i,j) + ssigp2 + p027*stressp_1(i,j) csigpse = p111*stressp_4(i,j) + ssigp1 + p027*stressp_2(i,j) - + csigmne = p111*stressm_1(i,j) + ssigm2 + p027*stressm_3(i,j) csigmnw = p111*stressm_2(i,j) + ssigm1 + p027*stressm_4(i,j) csigmsw = p111*stressm_3(i,j) + ssigm2 + p027*stressm_1(i,j) csigmse = p111*stressm_4(i,j) + ssigm1 + p027*stressm_2(i,j) - + csig12ne = p222*stress12_1(i,j) + ssig122 & + p055*stress12_3(i,j) csig12nw = p222*stress12_2(i,j) + ssig121 & @@ -1475,9 +1504,10 @@ subroutine stress_eap (nx_block, ny_block, & str12ns = p5*dyt(i,j)*(p333*ssig12n + p166*ssig12s) str12sn = p5*dyt(i,j)*(p333*ssig12s + p166*ssig12n) - !----------------------------------------------------------------- - ! for dF/dx (u momentum) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! for dF/dx (u momentum) + !----------------------------------------------------------------- + strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) @@ -1500,9 +1530,10 @@ subroutine stress_eap (nx_block, ny_block, & strtmp(i,j,4) = strp_tmp + strm_tmp + str12we & + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw - !----------------------------------------------------------------- - ! for dF/dy (v momentum) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! for dF/dy (v momentum) + !----------------------------------------------------------------- + strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) @@ -1530,7 +1561,6 @@ subroutine stress_eap (nx_block, ny_block, & end subroutine stress_eap !======================================================================= - ! Updates the stress depending on values of strain rate and structure ! tensor and for ksub=ndte it computes closing and sliding rate @@ -1551,7 +1581,7 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & real (kind=dbl_kind), intent(out) :: & stressp, stressm, stress12, & - alphar, alphas + alphar, alphas ! local variables @@ -1591,263 +1621,268 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & character(len=*), parameter :: subname = '(update_stress_rdg)' -! Factor to maintain the same stress as in EVP (see Section 3) -! Can be set to 1 otherwise + ! Factor to maintain the same stress as in EVP (see Section 3) + ! Can be set to 1 otherwise - if (first_call) then - invstressconviso = c1/(c1+kfriction*kfriction) - invsin = c1/sin(pi2/c12) * invstressconviso - endif + if (first_call) then + invstressconviso = c1/(c1+kfriction*kfriction) + invsin = c1/sin(pi2/c12) * invstressconviso + endif -! compute eigenvalues, eigenvectors and angles for structure tensor, strain rates + ! compute eigenvalues, eigenvectors and angles for structure tensor, strain rates -! 1) structure tensor + ! 1) structure tensor - a22 = c1-a11 + a22 = c1-a11 -! gamma: angle between general coordinates and principal axis of A -! here Tan2gamma = 2 a12 / (a11 - a22) + ! gamma: angle between general coordinates and principal axis of A + ! here Tan2gamma = 2 a12 / (a11 - a22) - Q11Q11 = c1 - Q12Q12 = puny - Q11Q12 = puny + Q11Q11 = c1 + Q12Q12 = puny + Q11Q12 = puny - if((ABS(a11 - a22) > puny).or.(ABS(a12) > puny)) then - Angle_denom_gamma = sqrt( ( a11 - a22 )*( a11 - a22) + & + if ((ABS(a11 - a22) > puny).or.(ABS(a12) > puny)) then + Angle_denom_gamma = sqrt( ( a11 - a22 )*( a11 - a22) + & c4*a12*a12 ) - Q11Q11 = p5 + ( a11 - a22 )*p5/Angle_denom_gamma !Cos^2 - Q12Q12 = p5 - ( a11 - a22 )*p5/Angle_denom_gamma !Sin^2 - Q11Q12 = a12/Angle_denom_gamma !CosSin - endif + Q11Q11 = p5 + ( a11 - a22 )*p5/Angle_denom_gamma !Cos^2 + Q12Q12 = p5 - ( a11 - a22 )*p5/Angle_denom_gamma !Sin^2 + Q11Q12 = a12/Angle_denom_gamma !CosSin + endif -! rotation Q*atemp*Q^T - atempprime = Q11Q11*a11 + c2*Q11Q12*a12 + Q12Q12*a22 + ! rotation Q*atemp*Q^T + atempprime = Q11Q11*a11 + c2*Q11Q12*a12 + Q12Q12*a22 -! make first principal value the largest - atempprime = max(atempprime, c1 - atempprime) + ! make first principal value the largest + atempprime = max(atempprime, c1 - atempprime) -! 2) strain rate + ! 2) strain rate - dtemp11 = p5*(divu + tension) - dtemp12 = shear*p5 - dtemp22 = p5*(divu - tension) + dtemp11 = p5*(divu + tension) + dtemp12 = shear*p5 + dtemp22 = p5*(divu - tension) -! here Tan2alpha = 2 dtemp12 / (dtemp11 - dtemp22) + ! here Tan2alpha = 2 dtemp12 / (dtemp11 - dtemp22) - Qd11Qd11 = c1 - Qd12Qd12 = puny - Qd11Qd12 = puny + Qd11Qd11 = c1 + Qd12Qd12 = puny + Qd11Qd12 = puny - if((ABS( dtemp11 - dtemp22) > puny).or.(ABS(dtemp12) > puny)) then - Angle_denom_alpha = sqrt( ( dtemp11 - dtemp22 )* & - ( dtemp11 - dtemp22 ) + c4*dtemp12*dtemp12) + if ((ABS( dtemp11 - dtemp22) > puny).or.(ABS(dtemp12) > puny)) then + Angle_denom_alpha = sqrt( ( dtemp11 - dtemp22 )* & + ( dtemp11 - dtemp22 ) + c4*dtemp12*dtemp12) - Qd11Qd11 = p5 + ( dtemp11 - dtemp22 )*p5/Angle_denom_alpha !Cos^2 - Qd12Qd12 = p5 - ( dtemp11 - dtemp22 )*p5/Angle_denom_alpha !Sin^2 - Qd11Qd12 = dtemp12/Angle_denom_alpha !CosSin - endif + Qd11Qd11 = p5 + ( dtemp11 - dtemp22 )*p5/Angle_denom_alpha !Cos^2 + Qd12Qd12 = p5 - ( dtemp11 - dtemp22 )*p5/Angle_denom_alpha !Sin^2 + Qd11Qd12 = dtemp12/Angle_denom_alpha !CosSin + endif - dtemp1 = Qd11Qd11*dtemp11 + c2*Qd11Qd12*dtemp12 + Qd12Qd12*dtemp22 - dtemp2 = Qd12Qd12*dtemp11 - c2*Qd11Qd12*dtemp12 + Qd11Qd11*dtemp22 + dtemp1 = Qd11Qd11*dtemp11 + c2*Qd11Qd12*dtemp12 + Qd12Qd12*dtemp22 + dtemp2 = Qd12Qd12*dtemp11 - c2*Qd11Qd12*dtemp12 + Qd11Qd11*dtemp22 -! In cos and sin values - x = c0 + ! In cos and sin values + x = c0 - if ((ABS(dtemp1) > puny).or.(ABS(dtemp2) > puny)) then -! invleng = c1/sqrt(dtemp1*dtemp1 + dtemp2*dtemp2) ! not sure if this is neccessary -! dtemp1 = dtemp1*invleng -! dtemp2 = dtemp2*invleng - if (dtemp1 == c0) then - x = pih - else - x = atan2(dtemp2,dtemp1) - endif + if ((ABS(dtemp1) > puny).or.(ABS(dtemp2) > puny)) then +! invleng = c1/sqrt(dtemp1*dtemp1 + dtemp2*dtemp2) ! not sure if this is neccessary +! dtemp1 = dtemp1*invleng +! dtemp2 = dtemp2*invleng + if (dtemp1 == c0) then + x = pih + else + x = atan2(dtemp2,dtemp1) endif + endif -!echmod to ensure the angle lies between pi/4 and 9 pi/4 - if (x < piq) x = x + pi2 -!echmod require 0 <= x < (nx_yield-1)*dx = 2 pi -! x = mod(x+pi2, pi2) - -! y: angle between major principal axis of strain rate and structure tensor -! y = gamma - alpha -! Expressesed componently with -! Tany = (Singamma*Cosgamma - Sinalpha*Cosgamma)/(Cos^2gamma - Sin^alpha) - - Tany_1 = Q11Q12 - Qd11Qd12 - Tany_2 = Q11Q11 - Qd12Qd12 - - y = c0 - - if ((ABS(Tany_1) > puny).or.(ABS(Tany_2) > puny)) then -! invleng = c1/sqrt(Tany_1*Tany_1 + Tany_2*Tany_2) ! not sure if this is neccessary -! Tany_1 = Tany_1*invleng -! Tany_2 = Tany_2*invleng - if (Tany_2 == c0) then - y = pih - else - y = atan2(Tany_1,Tany_2) - endif + !echmod to ensure the angle lies between pi/4 and 9 pi/4 + if (x < piq) x = x + pi2 + !echmod require 0 <= x < (nx_yield-1)*dx = 2 pi +! x = mod(x+pi2, pi2) + ! y: angle between major principal axis of strain rate and structure tensor + ! y = gamma - alpha + ! Expressesed componently with + ! Tany = (Singamma*Cosgamma - Sinalpha*Cosgamma)/(Cos^2gamma - Sin^alpha) + + Tany_1 = Q11Q12 - Qd11Qd12 + Tany_2 = Q11Q11 - Qd12Qd12 + + y = c0 + + if ((ABS(Tany_1) > puny).or.(ABS(Tany_2) > puny)) then +! invleng = c1/sqrt(Tany_1*Tany_1 + Tany_2*Tany_2) ! not sure if this is neccessary +! Tany_1 = Tany_1*invleng +! Tany_2 = Tany_2*invleng + if (Tany_2 == c0) then + y = pih + else + y = atan2(Tany_1,Tany_2) endif + endif -! to make sure y is between 0 and pi - if (y > pi) y = y - pi - if (y < 0) y = y + pi - -! Now calculate updated stress tensor - if (first_call) then - dx = pi/real(nx_yield-1,kind=dbl_kind) - dy = pi/real(ny_yield-1,kind=dbl_kind) - da = p5/real(na_yield-1,kind=dbl_kind) - invdx = c1/dx - invdy = c1/dy - invda = c1/da - endif + ! to make sure y is between 0 and pi - if (interpolate_stress_rdg) then - -! Interpolated lookup - - ! if (x>=9*pi/4) x=9*pi/4-puny; end - ! if (y>=pi/2) y=pi/2-puny; end - ! if (atempprime>=1.0), atempprime=1.0-puny; end - - ! % need 8 coords and 8 weights - ! % range in kx - - kx = int((x-piq-pi)*invdx) + 1 - kxw = c1 - ((x-piq-pi)*invdx - (kx-1)) - - ky = int(y*invdy) + 1 - kyw = c1 - (y*invdy - (ky-1)) - - ka = int((atempprime-p5)*invda) + 1 - kaw = c1 - ((atempprime-p5)*invda - (ka-1)) - -! % Determine sigma_r(A1,Zeta,y) and sigma_s (see Section A1) - - stemp11r = kxw* kyw * kaw * s11r(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s11r(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s11r(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s11r(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s11r(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s11r(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s11r(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s11r(kx+1,ky+1,ka+1) - - stemp12r = kxw* kyw * kaw * s12r(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s12r(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s12r(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s12r(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s12r(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s12r(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s12r(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s12r(kx+1,ky+1,ka+1) - - stemp22r = kxw * kyw * kaw * s22r(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s22r(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s22r(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s22r(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s22r(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s22r(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s22r(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s22r(kx+1,ky+1,ka+1) - - stemp11s = kxw* kyw * kaw * s11s(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s11s(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s11s(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s11s(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s11s(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s11s(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s11s(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s11s(kx+1,ky+1,ka+1) - - stemp12s = kxw* kyw * kaw * s12s(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s12s(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s12s(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s12s(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s12s(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s12s(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s12s(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s12s(kx+1,ky+1,ka+1) - - stemp22s = kxw* kyw * kaw * s22s(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s22s(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s22s(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s22s(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s22s(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s22s(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s22s(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s22s(kx+1,ky+1,ka+1) + if (y > pi) y = y - pi + if (y < 0) y = y + pi - else - kx = int((x-piq-pi)*invdx) + 1 - ky = int(y*invdy) + 1 - ka = int((atempprime-p5)*invda) + 1 - -! Determine sigma_r(A1,Zeta,y) and sigma_s (see Section A1) - stemp11r = s11r(kx,ky,ka) - stemp12r = s12r(kx,ky,ka) - stemp22r = s22r(kx,ky,ka) - - stemp11s = s11s(kx,ky,ka) - stemp12s = s12s(kx,ky,ka) - stemp22s = s22s(kx,ky,ka) - endif + ! Now calculate updated stress tensor + + if (first_call) then + dx = pi/real(nx_yield-1,kind=dbl_kind) + dy = pi/real(ny_yield-1,kind=dbl_kind) + da = p5/real(na_yield-1,kind=dbl_kind) + invdx = c1/dx + invdy = c1/dy + invda = c1/da + endif -! Calculate mean ice stress over a collection of floes (Equation 3) + if (interpolate_stress_rdg) then + + ! Interpolated lookup + + ! if (x>=9*pi/4) x=9*pi/4-puny; end + ! if (y>=pi/2) y=pi/2-puny; end + ! if (atempprime>=1.0), atempprime=1.0-puny; end + + ! % need 8 coords and 8 weights + ! % range in kx + + kx = int((x-piq-pi)*invdx) + 1 + kxw = c1 - ((x-piq-pi)*invdx - (kx-1)) + + ky = int(y*invdy) + 1 + kyw = c1 - (y*invdy - (ky-1)) + + ka = int((atempprime-p5)*invda) + 1 + kaw = c1 - ((atempprime-p5)*invda - (ka-1)) + + ! % Determine sigma_r(A1,Zeta,y) and sigma_s (see Section A1) + + stemp11r = kxw* kyw * kaw * s11r(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s11r(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s11r(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s11r(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s11r(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s11r(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s11r(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s11r(kx+1,ky+1,ka+1) + + stemp12r = kxw* kyw * kaw * s12r(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s12r(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s12r(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s12r(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s12r(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s12r(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s12r(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s12r(kx+1,ky+1,ka+1) + + stemp22r = kxw * kyw * kaw * s22r(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s22r(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s22r(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s22r(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s22r(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s22r(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s22r(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s22r(kx+1,ky+1,ka+1) + + stemp11s = kxw* kyw * kaw * s11s(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s11s(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s11s(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s11s(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s11s(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s11s(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s11s(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s11s(kx+1,ky+1,ka+1) + + stemp12s = kxw* kyw * kaw * s12s(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s12s(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s12s(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s12s(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s12s(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s12s(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s12s(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s12s(kx+1,ky+1,ka+1) + + stemp22s = kxw* kyw * kaw * s22s(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s22s(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s22s(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s22s(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s22s(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s22s(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s22s(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s22s(kx+1,ky+1,ka+1) - stressp = strength*(stemp11r + kfriction*stemp11s & - + stemp22r + kfriction*stemp22s) * invsin - stress12 = strength*(stemp12r + kfriction*stemp12s) * invsin - stressm = strength*(stemp11r + kfriction*stemp11s & - - stemp22r - kfriction*stemp22s) * invsin + else -! Back - rotation of the stress from principal axes into general coordinates + kx = int((x-piq-pi)*invdx) + 1 + ky = int(y*invdy) + 1 + ka = int((atempprime-p5)*invda) + 1 -! Update stress - sig11 = p5*(stressp + stressm) - sig12 = stress12 - sig22 = p5*(stressp - stressm) + ! Determine sigma_r(A1,Zeta,y) and sigma_s (see Section A1) - sgprm11 = Q11Q11*sig11 + Q12Q12*sig22 - c2*Q11Q12 *sig12 - sgprm12 = Q11Q12*sig11 - Q11Q12*sig22 + (Q11Q11 - Q12Q12)*sig12 - sgprm22 = Q12Q12*sig11 + Q11Q11*sig22 + c2*Q11Q12 *sig12 + stemp11r = s11r(kx,ky,ka) + stemp12r = s12r(kx,ky,ka) + stemp22r = s22r(kx,ky,ka) - stressp = sgprm11 + sgprm22 - stress12 = sgprm12 - stressm = sgprm11 - sgprm22 + stemp11s = s11s(kx,ky,ka) + stemp12s = s12s(kx,ky,ka) + stemp22s = s22s(kx,ky,ka) -! Compute ridging and sliding functions in general coordinates (Equation 11) - if (ksub == ndte) then - rotstemp11r = Q11Q11*stemp11r - c2*Q11Q12* stemp12r & - + Q12Q12*stemp22r - rotstemp12r = Q11Q11*stemp12r + Q11Q12*(stemp11r-stemp22r) & - - Q12Q12*stemp12r - rotstemp22r = Q12Q12*stemp11r + c2*Q11Q12* stemp12r & - + Q11Q11*stemp22r - - rotstemp11s = Q11Q11*stemp11s - c2*Q11Q12* stemp12s & - + Q12Q12*stemp22s - rotstemp12s = Q11Q11*stemp12s + Q11Q12*(stemp11s-stemp22s) & - - Q12Q12*stemp12s - rotstemp22s = Q12Q12*stemp11s + c2*Q11Q12* stemp12s & - + Q11Q11*stemp22s - - alphar = rotstemp11r*dtemp11 + c2*rotstemp12r*dtemp12 & - + rotstemp22r*dtemp22 - alphas = rotstemp11s*dtemp11 + c2*rotstemp12s*dtemp12 & - + rotstemp22s*dtemp22 - endif + endif + + ! Calculate mean ice stress over a collection of floes (Equation 3) + + stressp = strength*(stemp11r + kfriction*stemp11s & + + stemp22r + kfriction*stemp22s) * invsin + stress12 = strength*(stemp12r + kfriction*stemp12s) * invsin + stressm = strength*(stemp11r + kfriction*stemp11s & + - stemp22r - kfriction*stemp22s) * invsin + + ! Back - rotation of the stress from principal axes into general coordinates + + ! Update stress + + sig11 = p5*(stressp + stressm) + sig12 = stress12 + sig22 = p5*(stressp - stressm) - first_call = .false. + sgprm11 = Q11Q11*sig11 + Q12Q12*sig22 - c2*Q11Q12 *sig12 + sgprm12 = Q11Q12*sig11 - Q11Q12*sig22 + (Q11Q11 - Q12Q12)*sig12 + sgprm22 = Q12Q12*sig11 + Q11Q11*sig22 + c2*Q11Q12 *sig12 + + stressp = sgprm11 + sgprm22 + stress12 = sgprm12 + stressm = sgprm11 - sgprm22 + + ! Compute ridging and sliding functions in general coordinates (Equation 11) + + if (ksub == ndte) then + rotstemp11r = Q11Q11*stemp11r - c2*Q11Q12* stemp12r & + + Q12Q12*stemp22r + rotstemp12r = Q11Q11*stemp12r + Q11Q12*(stemp11r-stemp22r) & + - Q12Q12*stemp12r + rotstemp22r = Q12Q12*stemp11r + c2*Q11Q12* stemp12r & + + Q11Q11*stemp22r + + rotstemp11s = Q11Q11*stemp11s - c2*Q11Q12* stemp12s & + + Q12Q12*stemp22s + rotstemp12s = Q11Q11*stemp12s + Q11Q12*(stemp11s-stemp22s) & + - Q12Q12*stemp12s + rotstemp22s = Q12Q12*stemp11s + c2*Q11Q12* stemp12s & + + Q11Q11*stemp22s + + alphar = rotstemp11r*dtemp11 + c2*rotstemp12r*dtemp12 & + + rotstemp22r*dtemp22 + alphas = rotstemp11s*dtemp11 + c2*rotstemp12s*dtemp12 & + + rotstemp22s*dtemp22 + endif + + first_call = .false. end subroutine update_stress_rdg !======================================================================= - -! Solves evolution equation for structure tensor (A19, A20) +! Solves evolution equation for structure tensor (A19, A20) subroutine stepa (nx_block, ny_block, & dtei, icellt, & @@ -1870,19 +1905,19 @@ subroutine stepa (nx_block, ny_block, & dtei ! 1/dte, where dte is subcycling timestep (1/s) integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & ! ice stress tensor (kg/s^2) in each corner of T cell - stressp_1, stressp_2, stressp_3, stressp_4, & ! sigma11+sigma22 - stressm_1, stressm_2, stressm_3, stressm_4, & ! sigma11-sigma22 + stressp_1, stressp_2, stressp_3, stressp_4, & ! sigma11+sigma22 + stressm_1, stressm_2, stressm_3, stressm_4, & ! sigma11-sigma22 stress12_1, stress12_2, stress12_3, stress12_4 ! sigma12 real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & ! structure tensor () in each corner of T cell - a11, a12, a11_1, a11_2, a11_3, a11_4, & ! components of - a12_1, a12_2, a12_3, a12_4 ! structure tensor () + a11, a12, a11_1, a11_2, a11_3, a11_4, & ! components of + a12_1, a12_2, a12_3, a12_4 ! structure tensor () ! local variables @@ -1890,11 +1925,11 @@ subroutine stepa (nx_block, ny_block, & i, j, ij real (kind=dbl_kind) :: & - mresult11, mresult12, & - dteikth, p5kth + mresult11, mresult12, & + dteikth, p5kth real (kind=dbl_kind), parameter :: & - kth = p2*p001 + kth = p2*p001 character(len=*), parameter :: subname = '(stepa)' @@ -1905,62 +1940,61 @@ subroutine stepa (nx_block, ny_block, & i = indxti(ij) j = indxtj(ij) -! ne - call calc_ffrac(stressp_1(i,j), stressm_1(i,j), & - stress12_1(i,j), & - a11_1(i,j), a12_1(i,j), & - mresult11, mresult12) + ! ne + call calc_ffrac(stressp_1(i,j), stressm_1(i,j), & + stress12_1(i,j), & + a11_1(i,j), a12_1(i,j), & + mresult11, mresult12) a11_1(i,j) = (a11_1(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit a12_1(i,j) = (a12_1(i,j)*dtei - mresult12) * dteikth ! implicit - -! nw - call calc_ffrac(stressp_2(i,j), stressm_2(i,j), & - stress12_2(i,j), & - a11_2(i,j), a12_2(i,j), & - mresult11, mresult12) + + ! nw + call calc_ffrac(stressp_2(i,j), stressm_2(i,j), & + stress12_2(i,j), & + a11_2(i,j), a12_2(i,j), & + mresult11, mresult12) a11_2(i,j) = (a11_2(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit a12_2(i,j) = (a12_2(i,j)*dtei - mresult12) * dteikth ! implicit -! sw - call calc_ffrac(stressp_3(i,j), stressm_3(i,j), & - stress12_3(i,j), & - a11_3(i,j), a12_3(i,j), & - mresult11, mresult12) + ! sw + call calc_ffrac(stressp_3(i,j), stressm_3(i,j), & + stress12_3(i,j), & + a11_3(i,j), a12_3(i,j), & + mresult11, mresult12) a11_3(i,j) = (a11_3(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit a12_3(i,j) = (a12_3(i,j)*dtei - mresult12) * dteikth ! implicit - -! se - call calc_ffrac(stressp_4(i,j), stressm_4(i,j), & - stress12_4(i,j), & - a11_4(i,j), a12_4(i,j), & - mresult11, mresult12) + + ! se + call calc_ffrac(stressp_4(i,j), stressm_4(i,j), & + stress12_4(i,j), & + a11_4(i,j), a12_4(i,j), & + mresult11, mresult12) a11_4(i,j) = (a11_4(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit a12_4(i,j) = (a12_4(i,j)*dtei - mresult12) * dteikth ! implicit -! average structure tensor + ! average structure tensor a11(i,j) = p25*(a11_1(i,j) + a11_2(i,j) + a11_3(i,j) + a11_4(i,j)) a12(i,j) = p25*(a12_1(i,j) + a12_2(i,j) + a12_3(i,j) + a12_4(i,j)) - + enddo ! ij - + end subroutine stepa !======================================================================= - ! computes term in evolution equation for structure tensor which determines ! the ice floe re-orientation due to fracture ! Eq. 7: Ffrac = -kf(A-S) or = 0 depending on sigma_1 and sigma_2 subroutine calc_ffrac (stressp, stressm, & - stress12, & - a1x, a2x, & + stress12, & + a1x, a2x, & mresult1, mresult2) real (kind=dbl_kind), intent(in) :: & @@ -1982,62 +2016,61 @@ subroutine calc_ffrac (stressp, stressm, & character(len=*), parameter :: subname = '(calc_ffrac)' - sigma11 = p5*(stressp+stressm) - sigma12 = stress12 - sigma22 = p5*(stressp-stressm) + sigma11 = p5*(stressp+stressm) + sigma12 = stress12 + sigma22 = p5*(stressp-stressm) -! if ((sigma11-sigma22) == c0) then sigma11-sigma22 == 0 => stressn ==0 - if (stressm == c0) then +! if ((sigma11-sigma22) == c0) then sigma11-sigma22 == 0 => stressn ==0 + if (stressm == c0) then gamma = p5*(pih) - else + else gamma = p5*atan2((c2*sigma12),(sigma11-sigma22)) - endif + endif -! rotate tensor to get into sigma principal axis + ! rotate tensor to get into sigma principal axis - Q11 = cos(gamma) - Q12 = sin(gamma) + Q11 = cos(gamma) + Q12 = sin(gamma) - Q11Q11 = Q11*Q11 - Q11Q12 = Q11*Q12 - Q12Q12 = Q12*Q12 + Q11Q11 = Q11*Q11 + Q11Q12 = Q11*Q12 + Q12Q12 = Q12*Q12 - sigma_1 = Q11Q11*sigma11 + c2*Q11Q12*sigma12 & - + Q12Q12*sigma22 ! S(1,1) - sigma_2 = Q12Q12*sigma11 - c2*Q11Q12*sigma12 & - + Q11Q11*sigma22 ! S(2,2) + sigma_1 = Q11Q11*sigma11 + c2*Q11Q12*sigma12 & + + Q12Q12*sigma22 ! S(1,1) + sigma_2 = Q12Q12*sigma11 - c2*Q11Q12*sigma12 & + + Q11Q11*sigma22 ! S(2,2) -! Pure divergence - if ((sigma_1 >= c0).and.(sigma_2 >= c0)) then + ! Pure divergence + if ((sigma_1 >= c0).and.(sigma_2 >= c0)) then mresult1 = c0 mresult2 = c0 -! Unconfined compression: cracking of blocks not along the axial splitting direction -! which leads to the loss of their shape, so we again model it through diffusion - elseif ((sigma_1 >= c0).and.(sigma_2 < c0)) then + ! Unconfined compression: cracking of blocks not along the axial splitting direction + ! which leads to the loss of their shape, so we again model it through diffusion + elseif ((sigma_1 >= c0).and.(sigma_2 < c0)) then mresult1 = kfrac * (a1x - Q12Q12) mresult2 = kfrac * (a2x + Q11Q12) -! Shear faulting - elseif (sigma_2 == c0) then - mresult1 = c0 - mresult2 = c0 - elseif ((sigma_1 <= c0).and.(sigma_1/sigma_2 <= threshold)) then + ! Shear faulting + elseif (sigma_2 == c0) then + mresult1 = c0 + mresult2 = c0 + elseif ((sigma_1 <= c0).and.(sigma_1/sigma_2 <= threshold)) then mresult1 = kfrac * (a1x - Q12Q12) mresult2 = kfrac * (a2x + Q11Q12) -! Horizontal spalling - else + ! Horizontal spalling + else mresult1 = c0 mresult2 = c0 - endif + endif end subroutine calc_ffrac !======================================================================= !---! these subroutines write/read Fortran unformatted data files .. !======================================================================= - ! Dumps all values needed for a restart subroutine write_restart_eap () @@ -2053,7 +2086,7 @@ subroutine write_restart_eap () diag = .true. !----------------------------------------------------------------- - ! structure tensor + ! structure tensor !----------------------------------------------------------------- call write_restart_field(nu_dump_eap,0,a11_1,'ruf8','a11_1',1,diag) @@ -2069,7 +2102,6 @@ subroutine write_restart_eap () end subroutine write_restart_eap !======================================================================= - ! Reads all values needed for elastic anisotropic plastic dynamics restart subroutine read_restart_eap() @@ -2098,9 +2130,9 @@ subroutine read_restart_eap() ! Structure tensor must be read and scattered in pairs in order ! to properly match corner values across a tripole grid cut. !----------------------------------------------------------------- - if (my_task == master_task) write(nu_diag,*) & - 'structure tensor restart data' - + if (my_task == master_task) write(nu_diag,*) & + 'structure tensor restart data' + call read_restart_field(nu_restart_eap,0,a11_1,'ruf8', & 'a11_1',1,diag,field_loc_center,field_type_scalar) ! a11_1 call read_restart_field(nu_restart_eap,0,a11_3,'ruf8', & @@ -2121,22 +2153,22 @@ subroutine read_restart_eap() if (trim(grid_type) == 'tripole') then - call ice_HaloUpdate_stress(a11_1, a11_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a11_3, a11_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a11_2, a11_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a11_4, a11_2, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a12_1, a12_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a12_3, a12_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a12_2, a12_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a12_4, a12_2, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_1, a11_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_3, a11_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_2, a11_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_4, a11_2, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_1, a12_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_3, a12_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_2, a12_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_4, a12_2, halo_info, & + field_loc_center, field_type_scalar) endif @@ -2144,34 +2176,34 @@ subroutine read_restart_eap() ! Ensure unused values in west and south ghost cells are 0 !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - do j = 1, nghost - do i = 1, nx_block - a11_1 (i,j,iblk) = c0 - a11_2 (i,j,iblk) = c0 - a11_3 (i,j,iblk) = c0 - a11_4 (i,j,iblk) = c0 - a12_1 (i,j,iblk) = c0 - a12_2 (i,j,iblk) = c0 - a12_3 (i,j,iblk) = c0 - a12_4 (i,j,iblk) = c0 - enddo - enddo - do j = 1, ny_block - do i = 1, nghost - a11_1 (i,j,iblk) = c0 - a11_2 (i,j,iblk) = c0 - a11_3 (i,j,iblk) = c0 - a11_4 (i,j,iblk) = c0 - a12_1 (i,j,iblk) = c0 - a12_2 (i,j,iblk) = c0 - a12_3 (i,j,iblk) = c0 - a12_4 (i,j,iblk) = c0 - enddo - enddo + !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) + do iblk = 1, nblocks + do j = 1, nghost + do i = 1, nx_block + a11_1 (i,j,iblk) = c0 + a11_2 (i,j,iblk) = c0 + a11_3 (i,j,iblk) = c0 + a11_4 (i,j,iblk) = c0 + a12_1 (i,j,iblk) = c0 + a12_2 (i,j,iblk) = c0 + a12_3 (i,j,iblk) = c0 + a12_4 (i,j,iblk) = c0 enddo - !$OMP END PARALLEL DO + enddo + do j = 1, ny_block + do i = 1, nghost + a11_1 (i,j,iblk) = c0 + a11_2 (i,j,iblk) = c0 + a11_3 (i,j,iblk) = c0 + a11_4 (i,j,iblk) = c0 + a12_1 (i,j,iblk) = c0 + a12_2 (i,j,iblk) = c0 + a12_3 (i,j,iblk) = c0 + a12_4 (i,j,iblk) = c0 + enddo + enddo + enddo + !$OMP END PARALLEL DO end subroutine read_restart_eap diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index d54a73dd4..f18e60802 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -22,9 +22,9 @@ ! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. ! Oceanogr., 9, 817-846. ! -! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The ! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. -! +! ! author: Elizabeth C. Hunke, LANL ! ! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb (LANL) @@ -38,11 +38,14 @@ module ice_dyn_evp use ice_kinds_mod use ice_communicate, only: my_task, master_task use ice_constants, only: field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_loc_Eface, & field_type_scalar, field_type_vector use ice_constants, only: c0, p027, p055, p111, p166, & p222, p25, p333, p5, c1 - use ice_dyn_shared, only: stepu, dyn_prep1, dyn_prep2, dyn_finish, & - ndte, yield_curve, ecci, denom1, arlx1i, fcor_blk, uvel_init, vvel_init, & + use ice_dyn_shared, only: stepu, stepuv_CD, stepu_C, stepv_C, & + dyn_prep1, dyn_prep2, dyn_finish, & + ndte, yield_curve, ecci, denom1, arlx1i, fcor_blk, fcorE_blk, fcorN_blk, & + uvel_init, vvel_init, uvelE_init, vvelE_init, uvelN_init, vvelN_init, & seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & seabed_stress, Ktens, revp use ice_fileunits, only: nu_diag @@ -59,14 +62,13 @@ module ice_dyn_evp contains !======================================================================= - ! Elastic-viscous-plastic dynamics driver ! #ifdef CICE_IN_NEMO ! Wind stress is set during this routine from the values supplied -! via NEMO (unless calc_strair is true). These values are supplied -! rotated on u grid and multiplied by aice. strairxT = 0 in this -! case so operations in dyn_prep1 are pointless but carried out to +! via NEMO (unless calc_strair is true). These values are supplied +! rotated on u grid and multiplied by aice. strairxT = 0 in this +! case so operations in dyn_prep1 are pointless but carried out to ! minimise code changes. #endif ! @@ -85,42 +87,68 @@ subroutine evp (dt) strtltx, strtlty, strocnx, strocny, strintx, strinty, taubx, tauby, & strocnxT, strocnyT, strax, stray, & Tbu, hwater, & + strairxN, strairyN, icenmask, fmN, & + strtltxN, strtltyN, strocnxN, strocnyN, strintxN, strintyN, taubxN, taubyN, & + TbN, & + strairxE, strairyE, iceemask, fmE, & + strtltxE, strtltyE, strocnxE, strocnyE, strintxE, strintyE, taubxE, taubyE, & + TbE, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4 - use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, tinyarea, to_ugrid, t2ugrid_vector, u2tgrid_vector, & - grid_type - use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & + stress12_1, stress12_2, stress12_3, stress12_4, & + stresspT, stressmT, stress12T, & + stresspU, stressmU, stress12U + use ice_grid, only: hm, tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & + dxe, dxn, dxt, dxu, dye, dyn, dyt, dyu, & + ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & + dxhy, dyhx, cxp, cyp, cxm, cym, & + tarear, uarear, earear, narear, grid_average_X2Y, tarea, uarea, & + grid_type, grid_ice, & + grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv + use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & + uvelE, vvelE, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop, timer_evp_1d, timer_evp_2d use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & ice_dyn_evp_1d_copyout - use ice_dyn_shared, only: evp_algorithm, stack_velocity_field, unstack_velocity_field - use ice_dyn_shared, only: deformations + use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, & + DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & + strain_rates_U, & + dyn_haloUpdate + real (kind=dbl_kind), intent(in) :: & dt ! time step ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ksub , & ! subcycle step iblk , & ! block index ilo,ihi,jlo,jhi, & ! beginning and end of physical domain - i, j, ij + i, j, ij ! local indices - integer (kind=int_kind), dimension(max_blocks) :: & + integer (kind=int_kind), dimension(max_blocks) :: & icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + icelln , & ! no. of cells where icenmask = .true. + icelle , & ! no. of cells where iceemask = .true. + icellu ! no. of cells where iceumask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & indxti , & ! compressed index in i-direction indxtj , & ! compressed index in j-direction + indxei , & ! compressed index in i-direction + indxej , & ! compressed index in j-direction + indxni , & ! compressed index in i-direction + indxnj , & ! compressed index in j-direction indxui , & ! compressed index in i-direction indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnU , & ! i ocean current (m/s) + vocnU , & ! j ocean current (m/s) + ss_tltxU , & ! sea surface slope, x-direction (m/m) + ss_tltyU , & ! sea surface slope, y-direction (m/m) tmass , & ! total mass of ice and snow (kg/m^2) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) @@ -130,12 +158,53 @@ subroutine evp (dt) umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) - real (kind=dbl_kind), allocatable :: fld2(:,:,:,:) + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnN , & ! i ocean current (m/s) + vocnN , & ! j ocean current (m/s) + ss_tltxN , & ! sea surface slope, x-direction (m/m) + ss_tltyN , & ! sea surface slope, y-direction (m/m) + waterxN , & ! for ocean stress calculation, x (m/s) + wateryN , & ! for ocean stress calculation, y (m/s) + forcexN , & ! work array: combined atm stress and ocn tilt, x + forceyN , & ! work array: combined atm stress and ocn tilt, y + aiN , & ! ice fraction on N-grid + nmass , & ! total mass of ice and snow (N grid) + nmassdti ! mass of N-cell/dte (kg/m^2 s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnE , & ! i ocean current (m/s) + vocnE , & ! j ocean current (m/s) + ss_tltxE , & ! sea surface slope, x-direction (m/m) + ss_tltyE , & ! sea surface slope, y-direction (m/m) + waterxE , & ! for ocean stress calculation, x (m/s) + wateryE , & ! for ocean stress calculation, y (m/s) + forcexE , & ! work array: combined atm stress and ocn tilt, x + forceyE , & ! work array: combined atm stress and ocn tilt, y + aiE , & ! ice fraction on E-grid + emass , & ! total mass of ice and snow (E grid) + emassdti ! mass of E-cell/dte (kg/m^2 s) + + real (kind=dbl_kind), allocatable :: & + fld2(:,:,:,:) , & ! bundled fields size 2 + fld3(:,:,:,:) , & ! bundled fields size 3 + fld4(:,:,:,:) ! bundled fields size 4 + + real (kind=dbl_kind), allocatable :: & + strengthU(:,:,:), & ! strength averaged to U points + divergU (:,:,:), & ! div array on U points, differentiate from divu + tensionU (:,:,:), & ! tension array on U points + shearU (:,:,:), & ! shear array on U points + deltaU (:,:,:), & ! delta array on U points + zetax2T (:,:,:), & ! zetax2 = 2*zeta (bulk viscosity) + zetax2U (:,:,:), & ! zetax2T averaged to U points + etax2T (:,:,:), & ! etax2 = 2*eta (shear viscosity) + etax2U (:,:,:) ! etax2T averaged to U points real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation - logical (kind=log_kind) :: calc_strair + logical (kind=log_kind) :: & + calc_strair ! calculate air/ice stress integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & icetmask, & ! ice extent mask (T-cell) @@ -145,10 +214,15 @@ subroutine evp (dt) halo_info_mask ! ghost cell update info for masked halo type (block) :: & - this_block ! block information for current block + this_block ! block information for current block + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + + logical (kind=log_kind), save :: & + first_time = .true. ! first time logical - logical (kind=log_kind), save :: first_time = .true. - character(len=*), parameter :: subname = '(evp)' call ice_timer_start(timer_dynamics) ! dynamics @@ -158,13 +232,38 @@ subroutine evp (dt) !----------------------------------------------------------------- allocate(fld2(nx_block,ny_block,2,max_blocks)) + allocate(fld3(nx_block,ny_block,3,max_blocks)) + allocate(fld4(nx_block,ny_block,4,max_blocks)) + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + allocate(strengthU(nx_block,ny_block,max_blocks)) + allocate(divergU (nx_block,ny_block,max_blocks)) + allocate(tensionU (nx_block,ny_block,max_blocks)) + allocate(shearU (nx_block,ny_block,max_blocks)) + allocate(deltaU (nx_block,ny_block,max_blocks)) + allocate(zetax2T (nx_block,ny_block,max_blocks)) + allocate(zetax2U (nx_block,ny_block,max_blocks)) + allocate(etax2T (nx_block,ny_block,max_blocks)) + allocate(etax2U (nx_block,ny_block,max_blocks)) + strengthU(:,:,:) = c0 + divergU (:,:,:) = c0 + tensionU (:,:,:) = c0 + shearU (:,:,:) = c0 + deltaU (:,:,:) = c0 + zetax2T (:,:,:) = c0 + zetax2U (:,:,:) = c0 + etax2T (:,:,:) = c0 + etax2U (:,:,:) = c0 + + endif ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) !----------------------------------------------------------------- ! boundary updates - ! commented out because the ghost cells are freshly + ! commented out because the ghost cells are freshly ! updated after cleanup_itd !----------------------------------------------------------------- @@ -180,31 +279,29 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - rdg_conv (i,j,iblk) = c0 - rdg_shear(i,j,iblk) = c0 - divu (i,j,iblk) = c0 - shear(i,j,iblk) = c0 + do j = 1, ny_block + do i = 1, nx_block + rdg_conv (i,j,iblk) = c0 + rdg_shear(i,j,iblk) = c0 + divu (i,j,iblk) = c0 + shear(i,j,iblk) = c0 enddo enddo - !----------------------------------------------------------------- - ! preparation for dynamics - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! preparation for dynamics + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - call dyn_prep1 (nx_block, ny_block, & + call dyn_prep1 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - aice (:,:,iblk), vice (:,:,iblk), & - vsno (:,:,iblk), tmask (:,:,iblk), & - strairxT(:,:,iblk), strairyT(:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & + aice (:,:,iblk), vice (:,:,iblk), & + vsno (:,:,iblk), tmask (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) enddo ! iblk @@ -219,108 +316,323 @@ subroutine evp (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call to_ugrid(tmass,umass) - call to_ugrid(aice_init, aiu) - + call grid_average_X2Y('F', tmass , 'T' , umass , 'U') + call grid_average_X2Y('F', aice_init, 'T' , aiu , 'U') + call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') + call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') + call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') + call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyU, 'U') + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + call grid_average_X2Y('F', tmass , 'T' , emass, 'E') + call grid_average_X2Y('F', aice_init, 'T' , aie , 'E') + call grid_average_X2Y('F', tmass , 'T' , nmass, 'N') + call grid_average_X2Y('F', aice_init, 'T' , ain , 'N') + call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnN, 'N') + call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnN, 'N') + call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnE, 'E') + call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnE, 'E') + endif !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing - ! This wind stress is rotated on u grid and multiplied by aice + ! Map T to U, N, E as needed + ! This wind stress is rotated on u grid and multiplied by aice in coupler !---------------------------------------------------------------- call icepack_query_parameters(calc_strair_out=calc_strair) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (.not. calc_strair) then - strairx(:,:,:) = strax(:,:,:) - strairy(:,:,:) = stray(:,:,:) + if (.not. calc_strair) then + call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') else - call t2ugrid_vector(strairx) - call t2ugrid_vector(strairy) - endif + call ice_HaloUpdate (strairxT, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairyT, halo_info, & + field_loc_center, field_type_vector) + call grid_average_X2Y('F', strairxT, 'T', strairx, 'U') + call grid_average_X2Y('F', strairyT, 'T', strairy, 'U') + endif + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + if (.not. calc_strair) then + call grid_average_X2Y('F', strax , grid_atm_dynu, strairxN, 'N') + call grid_average_X2Y('F', stray , grid_atm_dynv, strairyN, 'N') + call grid_average_X2Y('F', strax , grid_atm_dynu, strairxE, 'E') + call grid_average_X2Y('F', stray , grid_atm_dynv, strairyE, 'E') + else + call grid_average_X2Y('F', strairxT, 'T' , strairxN, 'N') + call grid_average_X2Y('F', strairyT, 'T' , strairyN, 'N') + call grid_average_X2Y('F', strairxT, 'T' , strairxE, 'E') + call grid_average_X2Y('F', strairyT, 'T' , strairyE, 'E') + endif + endif !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) do iblk = 1, nblocks - !----------------------------------------------------------------- - ! more preparation for dynamics - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! more preparation for dynamics + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellt(iblk), icellu(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), umass (:,:,iblk), & - umassdti (:,:,iblk), fcor_blk (:,:,iblk), & - umask (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & - ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & - fm (:,:,iblk), dt, & - strtltx (:,:,iblk), strtlty (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvel_init (:,:,iblk), vvel_init (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - Tbu (:,:,iblk)) + if (trim(grid_ice) == 'B') then + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt (iblk), icellu (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), umass (:,:,iblk), & + umassdti (:,:,iblk), fcor_blk (:,:,iblk), & + umask (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & + icetmask (:,:,iblk), iceumask (:,:,iblk), & + fm (:,:,iblk), dt, & + strtltx (:,:,iblk), strtlty (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvel_init (:,:,iblk), vvel_init (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + Tbu (:,:,iblk)) - !----------------------------------------------------------------- - ! ice strength - !----------------------------------------------------------------- + elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt (iblk), icellu (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), umass (:,:,iblk), & + umassdti (:,:,iblk), fcor_blk (:,:,iblk), & + umaskCD (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & + icetmask (:,:,iblk), iceumask (:,:,iblk), & + fm (:,:,iblk), dt, & + strtltx (:,:,iblk), strtlty (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvel_init (:,:,iblk), vvel_init (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + Tbu (:,:,iblk)) + endif + + !----------------------------------------------------------------- + ! ice strength + !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize do ij = 1, icellt(iblk) i = indxti(ij, iblk) j = indxtj(ij, iblk) call icepack_ice_strength(ncat = ncat, & - aice = aice (i,j, iblk), & - vice = vice (i,j, iblk), & - aice0 = aice0 (i,j, iblk), & - aicen = aicen (i,j,:,iblk), & - vicen = vicen (i,j,:,iblk), & + aice = aice (i,j, iblk), & + vice = vice (i,j, iblk), & + aice0 = aice0 (i,j, iblk), & + aicen = aicen (i,j,:,iblk), & + vicen = vicen (i,j,:,iblk), & strength = strength(i,j, iblk) ) enddo ! ij enddo ! iblk !$OMP END PARALLEL DO + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! more preparation for dynamics on N grid + !----------------------------------------------------------------- + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt (iblk), icelln (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), nmass (:,:,iblk), & + nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & + nmask (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + strairxN (:,:,iblk), strairyN (:,:,iblk), & + ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & + icetmask (:,:,iblk), icenmask (:,:,iblk), & + fmN (:,:,iblk), dt, & + strtltxN (:,:,iblk), strtltyN (:,:,iblk), & + strocnxN (:,:,iblk), strocnyN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + + !----------------------------------------------------------------- + ! more preparation for dynamics on E grid + !----------------------------------------------------------------- + + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt (iblk), icelle (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), emass (:,:,iblk), & + emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & + emask (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + strairxE (:,:,iblk), strairyE (:,:,iblk), & + ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & + icetmask (:,:,iblk), iceemask (:,:,iblk), & + fmE (:,:,iblk), dt, & + strtltxE (:,:,iblk), strtltyE (:,:,iblk), & + strocnxE (:,:,iblk), strocnyE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + + do i=1,nx_block + do j=1,ny_block + if (.not.iceumask(i,j,iblk)) then + stresspU (i,j,iblk) = c0 + stressmU (i,j,iblk) = c0 + stress12U(i,j,iblk) = c0 + endif + if (icetmask(i,j,iblk) == 0) then + stresspT (i,j,iblk) = c0 + stressmT (i,j,iblk) = c0 + stress12T(i,j,iblk) = c0 + endif + enddo + enddo + enddo ! iblk + !$OMP END PARALLEL DO + + endif ! grid_ice + call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uvelE, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (vvelN, halo_info, & + field_loc_Nface, field_type_vector) + call ice_timer_stop(timer_bound) + + if (grid_ice == 'C') then + call grid_average_X2Y('A', uvelE, 'E', uvelN, 'N') + call grid_average_X2Y('A', vvelN, 'N', vvelE, 'E') + uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) + vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) + endif + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uvelN, halo_info, & + field_loc_Nface, field_type_vector) + call ice_HaloUpdate (vvelE, halo_info, & + field_loc_Eface, field_type_vector) + call ice_timer_stop(timer_bound) + + call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') + uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) + vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + endif + call ice_timer_start(timer_bound) - call ice_HaloUpdate (strength, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate (strength, halo_info, & + field_loc_center, field_type_scalar) + ! velocities may have changed in dyn_prep2 - call stack_velocity_field(uvel, vvel, fld2) + call stack_fields(uvel, vvel, fld2) call ice_HaloUpdate (fld2, halo_info, & field_loc_NEcorner, field_type_vector) - call unstack_velocity_field(fld2, uvel, vvel) + call unstack_fields(fld2, uvel, vvel) call ice_timer_stop(timer_bound) if (maskhalo_dyn) then - call ice_timer_start(timer_bound) halomask = 0 - where (iceumask) halomask = 1 - call ice_HaloUpdate (halomask, halo_info, & - field_loc_center, field_type_scalar) + if (grid_ice == 'B') then + where (iceumask) halomask = 1 + elseif (grid_ice == 'C' .or. grid_ice == 'CD') then + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo,jhi + do i = ilo,ihi + if (icetmask(i ,j ,iblk) /= 0 .or. & + icetmask(i-1,j ,iblk) /= 0 .or. & + icetmask(i+1,j ,iblk) /= 0 .or. & + icetmask(i ,j-1,iblk) /= 0 .or. & + icetmask(i ,j+1,iblk) /= 0) then + halomask(i,j,iblk) = 1 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif + call ice_timer_start(timer_bound) + call ice_HaloUpdate (halomask, halo_info, & + field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) call ice_HaloMask(halo_info_mask, halo_info, halomask) endif @@ -328,34 +640,70 @@ subroutine evp (dt) !----------------------------------------------------------------- ! seabed stress factor Tbu (Tbu is part of Cb coefficient) !----------------------------------------------------------------- - + if (seabed_stress) then - if ( seabed_stress_method == 'LKD' ) then - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call seabed_stress_factor_LKD (nx_block, ny_block, & - icellu (iblk), & - indxui(:,iblk), indxuj(:,iblk), & - vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) - enddo - !$OMP END PARALLEL DO - elseif ( seabed_stress_method == 'probabilistic' ) then - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call seabed_stress_factor_prob (nx_block, ny_block, & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & - aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) - enddo - !$OMP END PARALLEL DO + if (grid_ice == "B") then + + if ( seabed_stress_method == 'LKD' ) then + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call seabed_stress_factor_LKD (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj(:,iblk), & + vice (:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), Tbu (:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + elseif ( seabed_stress_method == 'probabilistic' ) then + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call seabed_stress_factor_prob (nx_block , ny_block , & + icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & + icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & + hwater (:,:,iblk), Tbu (:,:,iblk)) + enddo + !$OMP END PARALLEL DO + endif + + elseif (grid_ice == "C" .or. grid_ice == "CD") then + + if ( seabed_stress_method == 'LKD' ) then + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call seabed_stress_factor_LKD (nx_block , ny_block, & + icelle (iblk), & + indxei (:,iblk), indxej(:,iblk), & + vice (:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), TbE (:,:,iblk)) + call seabed_stress_factor_LKD (nx_block , ny_block, & + icelln (iblk), & + indxni (:,iblk), indxnj(:,iblk), & + vice (:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), TbN (:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + elseif ( seabed_stress_method == 'probabilistic' ) then + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call seabed_stress_factor_prob (nx_block , ny_block , & + icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & + icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & + hwater (:,:,iblk), Tbu (:,:,iblk) , & + TbE (:,:,iblk), TbN (:,:,iblk) , & + icelle(iblk), indxei(:,iblk), indxej(:,iblk), & + icelln(iblk), indxni(:,iblk), indxnj(:,iblk) ) + enddo + !$OMP END PARALLEL DO + endif endif - endif - call ice_timer_start(timer_evp_2d) + endif if (evp_algorithm == "shared_mem_1d" ) then @@ -368,118 +716,471 @@ subroutine evp (dt) & Kernel not tested on tripole grid. Set evp_algorithm=standard_2d') endif - call ice_dyn_evp_1d_copyin( & + call ice_timer_start(timer_evp_1d) + call ice_dyn_evp_1d_copyin( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & icetmask, iceumask, & - cdn_ocn,aiu,uocn,vocn,forcex,forcey,Tbu, & + cdn_ocn,aiu,uocnU,vocnU,forcex,forcey,Tbu, & umassdti,fm,uarear,tarear,strintx,strinty,uvel_init,vvel_init,& strength,uvel,vvel,dxt,dyt, & stressp_1 ,stressp_2, stressp_3, stressp_4, & stressm_1 ,stressm_2, stressm_3, stressm_4, & stress12_1,stress12_2,stress12_3,stress12_4 ) - call ice_timer_start(timer_evp_1d) call ice_dyn_evp_1d_kernel() - call ice_timer_stop(timer_evp_1d) - call ice_dyn_evp_1d_copyout( & - nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost,& -!strocn uvel,vvel, strocnx,strocny, strintx,strinty, & + call ice_dyn_evp_1d_copyout( & + nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & +!strocn uvel,vvel, strocnx,strocny, strintx,strinty, & uvel,vvel, strintx,strinty, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1,stress12_2,stress12_3,stress12_4, & divu,rdg_conv,rdg_shear,shear,taubx,tauby ) + call ice_timer_stop(timer_evp_1d) else ! evp_algorithm == standard_2d (Standard CICE) + call ice_timer_start(timer_evp_2d) do ksub = 1,ndte ! subcycling - !----------------------------------------------------------------- - ! stress tensor equation, total surface stress - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) - do iblk = 1, nblocks + if (grid_ice == "B") then + + !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! stress tensor equation, total surface stress + !----------------------------------------------------------------- + call stress (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxt (:,:,iblk), dyt (:,:,iblk), & + dxhy (:,:,iblk), dyhx (:,:,iblk), & + cxp (:,:,iblk), cyp (:,:,iblk), & + cxm (:,:,iblk), cym (:,:,iblk), & + DminTarea (:,:,iblk), & + strength (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + strtmp (:,:,:) ) + + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then + call deformations (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxt (:,:,iblk), dyt (:,:,iblk), & + cxp (:,:,iblk), cyp (:,:,iblk), & + cxm (:,:,iblk), cym (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) + endif + + !----------------------------------------------------------------- + ! momentum equation + !----------------------------------------------------------------- + call stepu (nx_block , ny_block , & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), strtmp (:,:,:), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + umassdti (:,:,iblk), fm (:,:,iblk), & + uarear (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + uvel_init(:,:,iblk), vvel_init(:,:,iblk),& + uvel (:,:,iblk), vvel (:,:,iblk), & + Tbu (:,:,iblk)) + + enddo ! iblk + !$OMP END PARALLEL DO + + elseif (grid_ice == "C") then + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + call strain_rates_U (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + ratiodxN(:,:,iblk), ratiodxNr(:,:,iblk), & + ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & + epm (:,:,iblk), npm (:,:,iblk), & + divergU (:,:,iblk), tensionU (:,:,iblk), & + shearU (:,:,iblk), deltaU (:,:,iblk) ) + + enddo ! iblk + !$OMP END PARALLEL DO + + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_scalar, & + shearU) + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call stressC_T (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + uarea (:,:,iblk), DminTarea (:,:,iblk), & + strength (:,:,iblk), shearU (:,:,iblk), & + zetax2T (:,:,iblk), etax2T (:,:,iblk), & + stresspT (:,:,iblk), stressmT (:,:,iblk)) + + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then + + call deformationsC_T (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), uarea (:,:,iblk), & + shearU (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + + endif + enddo + !$OMP END PARALLEL DO + + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_center, field_type_scalar, & + zetax2T, etax2T, stresspT, stressmT) + + if (visc_method == 'avg_strength') then + call grid_average_X2Y('S', strength, 'T', strengthU, 'U') + elseif (visc_method == 'avg_zeta') then + call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') + endif -! if (trim(yield_curve) == 'ellipse') then - call stress (nx_block, ny_block, & - icellt(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxt (:,:,iblk), dyt (:,:,iblk), & - dxhy (:,:,iblk), dyhx (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - tinyarea (:,:,iblk), & - strength (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - strtmp (:,:,:) ) -! endif - - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - call deformations (nx_block, ny_block , & - icellt(iblk) , & - indxti(:,iblk) , indxtj(:,iblk) , & - uvel(:,:,iblk) , vvel(:,:,iblk) , & - dxt(:,:,iblk) , dyt(:,:,iblk) , & - cxp(:,:,iblk) , cyp(:,:,iblk) , & - cxm(:,:,iblk) , cym(:,:,iblk) , & - tarear(:,:,iblk) , & - shear(:,:,iblk) , divu(:,:,iblk) , & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call stressC_U (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uarea (:,:,iblk), & + etax2U (:,:,iblk), deltaU (:,:,iblk), & + strengthU (:,:,iblk), shearU (:,:,iblk), & + stress12U (:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info , halo_info_mask, & + field_loc_NEcorner, field_type_scalar, & + stress12U) + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call div_stress_Ex (nx_block , ny_block , & + icelle (iblk), & + indxei (:,iblk), indxej (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintxE (:,:,iblk) ) + + call div_stress_Ny (nx_block , ny_block , & + icelln (iblk), & + indxni (:,iblk), indxnj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintyN (:,:,iblk) ) + + enddo + !$OMP END PARALLEL DO + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call stepu_C (nx_block , ny_block , & ! u, E point + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), forcexE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), taubxE (:,:,iblk), & + uvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call stepv_C (nx_block, ny_block, & ! v, N point + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + wateryN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintyN (:,:,iblk), taubyN (:,:,iblk), & + vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Eface, field_type_vector, & + uvelE) + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Nface, field_type_vector, & + vvelN) + + call grid_average_X2Y('A', uvelE, 'E', uvelN, 'N') + call grid_average_X2Y('A', vvelN, 'N', vvelE, 'E') + uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) + vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) + + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Nface, field_type_vector, & + uvelN) + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Eface, field_type_vector, & + vvelE) + + call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') + + uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) + vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + + elseif (grid_ice == "CD") then + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call stressCD_T (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + DminTarea (:,:,iblk), & + strength (:,:,iblk), & + zetax2T (:,:,iblk), etax2T (:,:,iblk), & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12T (:,:,iblk) ) + + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then + call deformationsCD_T (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + endif + enddo + !$OMP END PARALLEL DO + + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_center, field_type_scalar, & + zetax2T, etax2T) + + if (visc_method == 'avg_strength') then + call grid_average_X2Y('S', strength, 'T', strengthU, 'U') + elseif (visc_method == 'avg_zeta') then + call grid_average_X2Y('S', zetax2T , 'T', zetax2U , 'U') + call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') endif - - !----------------------------------------------------------------- - ! momentum equation - !----------------------------------------------------------------- - - call stepu (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), strtmp (:,:,:), & - uocn (:,:,iblk), vocn (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - umassdti (:,:,iblk), fm (:,:,iblk), & - uarear (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - uvel_init(:,:,iblk), vvel_init(:,:,iblk),& - uvel (:,:,iblk), vvel (:,:,iblk), & - Tbu (:,:,iblk)) - enddo - !$OMP END PARALLEL DO + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + call strain_rates_U (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + ratiodxN (:,:,iblk), ratiodxNr(:,:,iblk), & + ratiodyE (:,:,iblk), ratiodyEr(:,:,iblk), & + epm (:,:,iblk), npm (:,:,iblk), & + divergU (:,:,iblk), tensionU (:,:,iblk), & + shearU (:,:,iblk), DeltaU (:,:,iblk) ) + + call stressCD_U (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uarea (:,:,iblk), & + zetax2U (:,:,iblk), etax2U (:,:,iblk), & + strengthU(:,:,iblk), & + divergU (:,:,iblk), tensionU (:,:,iblk), & + shearU (:,:,iblk), DeltaU (:,:,iblk), & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12U(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_center, field_type_scalar, & + stresspT, stressmT, stress12T) + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner,field_type_scalar, & + stresspU, stressmU, stress12U) + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call div_stress_Ex (nx_block , ny_block , & + icelle (iblk), & + indxei (:,iblk), indxej (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintxE (:,:,iblk) ) + + call div_stress_Ey (nx_block , ny_block , & + icelle (iblk), & + indxei (:,iblk), indxej (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earear (:,:,iblk) , & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12T (:,:,iblk), strintyE (:,:,iblk) ) + + call div_stress_Nx (nx_block , ny_block , & + icelln (iblk), & + indxni (:,iblk), indxnj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narear (:,:,iblk) , & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12T (:,:,iblk), strintxN (:,:,iblk) ) + + call div_stress_Ny (nx_block , ny_block , & + icelln (iblk), & + indxni (:,iblk), indxnj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintyN (:,:,iblk) ) + + enddo + !$OMP END PARALLEL DO + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call stepuv_CD (nx_block , ny_block , & ! E point + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call stepuv_CD (nx_block , ny_block , & ! N point + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Eface, field_type_vector, & + uvelE, vvelE) + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Nface, field_type_vector, & + uvelN, vvelN) + + call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') + + uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) + vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + + endif ! grid_ice + + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) - call stack_velocity_field(uvel, vvel, fld2) - call ice_timer_start(timer_bound) - if (maskhalo_dyn) then - call ice_HaloUpdate (fld2, halo_info_mask, & - field_loc_NEcorner, field_type_vector) - else - call ice_HaloUpdate (fld2, halo_info, & - field_loc_NEcorner, field_type_vector) - endif - call ice_timer_stop(timer_bound) - call unstack_velocity_field(fld2, uvel, vvel) - enddo ! subcycling + call ice_timer_stop(timer_evp_2d) endif ! evp_algorithm - call ice_timer_stop(timer_evp_2d) + deallocate(fld2,fld3,fld4) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + deallocate(strengthU, divergU, tensionU, shearU, deltaU) + deallocate(zetax2T, zetax2U, etax2T, etax2U) + endif - deallocate(fld2) - if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) + if (maskhalo_dyn) then + call ice_HaloDestroy(halo_info_mask) + endif ! Force symmetry across the tripole seam if (trim(grid_type) == 'tripole') then + ! TODO: C/CD-grid if (maskhalo_dyn) then !------------------------------------------------------- ! set halomask to zero because ice_HaloMask always keeps @@ -488,61 +1189,61 @@ subroutine evp (dt) halomask = 0 call ice_HaloMask(halo_info_mask, halo_info, halomask) - call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_3, stressp_1, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_2, stressp_4, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_4, stressp_2, halo_info_mask, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stressm_1, stressm_3, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_3, stressm_1, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_2, stressm_4, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_4, stressm_2, halo_info_mask, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_1 , stressp_3 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_3 , stressp_1 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_2 , stressp_4 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_4 , stressp_2 , halo_info_mask, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stressm_1 , stressm_3 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_3 , stressm_1 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_2 , stressm_4 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_4 , stressm_2 , halo_info_mask, & + field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info_mask, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info_mask, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info_mask, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info_mask, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) call ice_HaloDestroy(halo_info_mask) else - call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_3, stressp_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_2, stressp_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_4, stressp_2, halo_info, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stressm_1, stressm_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_3, stressm_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_2, stressm_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_4, stressm_2, halo_info, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_1 , stressp_3 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_3 , stressp_1 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_2 , stressp_4 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_4 , stressp_2 , halo_info, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stressm_1 , stressm_3 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_3 , stressm_1 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_2 , stressm_4 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_4 , stressm_2 , halo_info, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & + field_loc_center, field_type_scalar) endif ! maskhalo endif ! tripole @@ -552,56 +1253,105 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks + call dyn_finish & + (nx_block , ny_block , & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & + aiu (:,:,iblk), fm (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + + call dyn_finish & + (nx_block , ny_block , & + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + aiN (:,:,iblk), fmN (:,:,iblk), & + strocnxN(:,:,iblk), strocnyN(:,:,iblk)) + + call dyn_finish & + (nx_block , ny_block , & + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + aiE (:,:,iblk), fmE (:,:,iblk), & + strocnxE(:,:,iblk), strocnyE(:,:,iblk)) - call dyn_finish & - (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & - aiu (:,:,iblk), fm (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strocnxT(:,:,iblk), strocnyT(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + endif + + ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! TODO: This should be done elsewhere as part of generalization? + ! TODO: Rename strocn[x,y]T since it's different than strocn[x,y][U,N,E] + ! conservation requires aiu be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) SCHEDULE(runtime) + do iblk = 1, nblocks + do ij = 1, icellu(iblk) + i = indxui(ij,iblk) + j = indxuj(ij,iblk) + work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) + work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + enddo enddo !$OMP END PARALLEL DO + call ice_HaloUpdate (work1, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_HaloUpdate (work2, halo_info, & + field_loc_NEcorner, field_type_vector) + call grid_average_X2Y('F', work1, 'U', strocnxT, 'T') ! shift + call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') - call u2tgrid_vector(strocnxT) ! shift - call u2tgrid_vector(strocnyT) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + call grid_average_X2Y('S', strintxE, 'E', strintx, 'U') ! diagnostic + call grid_average_X2Y('S', strintyN, 'N', strinty, 'U') ! diagnostic + endif call ice_timer_stop(timer_dynamics) ! dynamics end subroutine evp !======================================================================= - ! Computes the rates of strain and internal stress components for ! each of the four corners on each T-grid cell. ! Computes stress terms for the momentum equation ! ! author: Elizabeth C. Hunke, LANL - subroutine stress (nx_block, ny_block, & - icellt, & - indxti, indxtj, & - uvel, vvel, & - dxt, dyt, & - dxhy, dyhx, & - cxp, cyp, & - cxm, cym, & - tinyarea, & - strength, & - stressp_1, stressp_2, & - stressp_3, stressp_4, & - stressm_1, stressm_2, & - stressm_3, stressm_4, & - stress12_1, stress12_2, & - stress12_3, stress12_4, & + subroutine stress (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvel, vvel, & + dxt, dyt, & + dxhy, dyhx, & + cxp, cyp, & + cxm, cym, & + DminTarea, & + strength, & + stressp_1, stressp_2, & + stressp_3, stressp_4, & + stressm_1, stressm_2, & + stressm_3, stressm_4, & + stress12_1, stress12_2, & + stress12_3, stress12_4, & str ) - use ice_dyn_shared, only: strain_rates, deformations, viscous_coeffs_and_rep_pressure - - integer (kind=int_kind), intent(in) :: & + use ice_dyn_shared, only: strain_rates, visc_replpress, capping + + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellt ! no. of cells where icetmask = 1 @@ -621,7 +1371,7 @@ subroutine stress (nx_block, ny_block, & cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW cxm , & ! 0.5*HTN - 1.5*HTS - tinyarea ! puny*tarea + DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 @@ -641,10 +1391,10 @@ subroutine stress (nx_block, ny_block, & tensionne, tensionnw, tensionse, tensionsw, & ! tension shearne, shearnw, shearse, shearsw , & ! shearing Deltane, Deltanw, Deltase, Deltasw , & ! Delt - zetax2ne, zetax2nw, zetax2se, zetax2sw , & ! 2 x zeta (visc coeff) - etax2ne, etax2nw, etax2se, etax2sw , & ! 2 x eta (visc coeff) + zetax2ne, zetax2nw, zetax2se, zetax2sw , & ! 2 x zeta (bulk visc) + etax2ne, etax2nw, etax2se, etax2sw , & ! 2 x eta (shear visc) rep_prsne, rep_prsnw, rep_prsse, rep_prssw, & ! replacement pressure -! puny , & ! puny +! puny , & ! puny ssigpn, ssigps, ssigpe, ssigpw , & ssigmn, ssigms, ssigme, ssigmw , & ssig12n, ssig12s, ssig12e, ssig12w , & @@ -655,8 +1405,6 @@ subroutine stress (nx_block, ny_block, & str12ew, str12we, str12ns, str12sn , & strp_tmp, strm_tmp, tmp - real(kind=dbl_kind),parameter :: capping = c1 ! of the viscous coef - character(len=*), parameter :: subname = '(stress)' !----------------------------------------------------------------- @@ -669,10 +1417,10 @@ subroutine stress (nx_block, ny_block, & i = indxti(ij) j = indxtj(ij) - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- call strain_rates (nx_block, ny_block, & i, j, & @@ -689,88 +1437,89 @@ subroutine stress (nx_block, ny_block, & Deltane, Deltanw, & Deltase, Deltasw ) - !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure - !----------------------------------------------------------------- - - call viscous_coeffs_and_rep_pressure (strength(i,j), tinyarea(i,j),& - Deltane, Deltanw, & - Deltasw, Deltase, & - zetax2ne, zetax2nw, & - zetax2sw, zetax2se, & - etax2ne, etax2nw, & - etax2sw, etax2se, & - rep_prsne, rep_prsnw, & - rep_prssw, rep_prsse, & - capping) - - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - ! (1) northeast, (2) northwest, (3) southwest, (4) southeast - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! viscosities and replacement pressure + !----------------------------------------------------------------- - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - stressp_1(i,j) = (stressp_1(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2ne*divune - rep_prsne)) * denom1 - stressp_2(i,j) = (stressp_2(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2nw*divunw - rep_prsnw)) * denom1 - stressp_3(i,j) = (stressp_3(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2sw*divusw - rep_prssw)) * denom1 - stressp_4(i,j) = (stressp_4(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2se*divuse - rep_prsse)) * denom1 - - stressm_1(i,j) = (stressm_1(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2ne*tensionne) * denom1 - stressm_2(i,j) = (stressm_2(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2nw*tensionnw) * denom1 - stressm_3(i,j) = (stressm_3(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2sw*tensionsw) * denom1 - stressm_4(i,j) = (stressm_4(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2se*tensionse) * denom1 - - stress12_1(i,j) = (stress12_1(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2ne*shearne) * denom1 - stress12_2(i,j) = (stress12_2(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2nw*shearnw) * denom1 - stress12_3(i,j) = (stress12_3(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2sw*shearsw) * denom1 - stress12_4(i,j) = (stress12_4(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2se*shearse) * denom1 + call visc_replpress (strength(i,j), DminTarea(i,j), Deltane, & + zetax2ne, etax2ne, rep_prsne, capping) - !----------------------------------------------------------------- - ! Eliminate underflows. - ! The following code is commented out because it is relatively - ! expensive and most compilers include a flag that accomplishes - ! the same thing more efficiently. This code is cheaper than - ! handling underflows if the compiler lacks a flag; uncomment - ! it in that case. The compiler flag is often described with the - ! phrase "flush to zero". - !----------------------------------------------------------------- + call visc_replpress (strength(i,j), DminTarea(i,j), Deltanw, & + zetax2nw, etax2nw, rep_prsnw, capping) -! call icepack_query_parameters(puny_out=puny) -! call icepack_warnings_flush(nu_diag) -! if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & -! file=__FILE__, line=__LINE__) + call visc_replpress (strength(i,j), DminTarea(i,j), Deltasw, & + zetax2sw, etax2sw, rep_prssw, capping) -! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) -! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) -! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) -! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) + call visc_replpress (strength(i,j), DminTarea(i,j), Deltase, & + zetax2se, etax2se, rep_prsse, capping) -! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) -! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) -! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) -! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + ! (1) northeast, (2) northwest, (3) southwest, (4) southeast + !----------------------------------------------------------------- -! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) -! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) -! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) -! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stressp_1 (i,j) = (stressp_1 (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2ne*divune - rep_prsne)) * denom1 + stressp_2 (i,j) = (stressp_2 (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2nw*divunw - rep_prsnw)) * denom1 + stressp_3 (i,j) = (stressp_3 (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2sw*divusw - rep_prssw)) * denom1 + stressp_4 (i,j) = (stressp_4 (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2se*divuse - rep_prsse)) * denom1 + + stressm_1 (i,j) = (stressm_1 (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2ne*tensionne) * denom1 + stressm_2 (i,j) = (stressm_2 (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2nw*tensionnw) * denom1 + stressm_3 (i,j) = (stressm_3 (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2sw*tensionsw) * denom1 + stressm_4 (i,j) = (stressm_4 (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2se*tensionse) * denom1 + + stress12_1(i,j) = (stress12_1(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2ne*shearne) * denom1 + stress12_2(i,j) = (stress12_2(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2nw*shearnw) * denom1 + stress12_3(i,j) = (stress12_3(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2sw*shearsw) * denom1 + stress12_4(i,j) = (stress12_4(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2se*shearse) * denom1 - !----------------------------------------------------------------- - ! combinations of the stresses for the momentum equation ! kg/s^2 - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! Eliminate underflows. + ! The following code is commented out because it is relatively + ! expensive and most compilers include a flag that accomplishes + ! the same thing more efficiently. This code is cheaper than + ! handling underflows if the compiler lacks a flag; uncomment + ! it in that case. The compiler flag is often described with the + ! phrase "flush to zero". + !----------------------------------------------------------------- + +! call icepack_query_parameters(puny_out=puny) +! call icepack_warnings_flush(nu_diag) +! if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & +! file=__FILE__, line=__LINE__) + +! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) +! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) +! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) +! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) + +! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) +! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) +! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) +! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) + +! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) +! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) +! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) +! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) + + !----------------------------------------------------------------- + ! combinations of the stresses for the momentum equation ! kg/s^2 + !----------------------------------------------------------------- ssigpn = stressp_1(i,j) + stressp_2(i,j) ssigps = stressp_3(i,j) + stressp_4(i,j) @@ -797,12 +1546,12 @@ subroutine stress (nx_block, ny_block, & csigpnw = p111*stressp_2(i,j) + ssigp1 + p027*stressp_4(i,j) csigpsw = p111*stressp_3(i,j) + ssigp2 + p027*stressp_1(i,j) csigpse = p111*stressp_4(i,j) + ssigp1 + p027*stressp_2(i,j) - + csigmne = p111*stressm_1(i,j) + ssigm2 + p027*stressm_3(i,j) csigmnw = p111*stressm_2(i,j) + ssigm1 + p027*stressm_4(i,j) csigmsw = p111*stressm_3(i,j) + ssigm2 + p027*stressm_1(i,j) csigmse = p111*stressm_4(i,j) + ssigm1 + p027*stressm_2(i,j) - + csig12ne = p222*stress12_1(i,j) + ssig122 & + p055*stress12_3(i,j) csig12nw = p222*stress12_2(i,j) + ssig121 & @@ -817,9 +1566,9 @@ subroutine stress (nx_block, ny_block, & str12ns = p5*dyt(i,j)*(p333*ssig12n + p166*ssig12s) str12sn = p5*dyt(i,j)*(p333*ssig12s + p166*ssig12n) - !----------------------------------------------------------------- - ! for dF/dx (u momentum) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! for dF/dx (u momentum) + !----------------------------------------------------------------- strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) @@ -842,9 +1591,9 @@ subroutine stress (nx_block, ny_block, & str(i,j,4) = strp_tmp + strm_tmp + str12we & + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw - !----------------------------------------------------------------- - ! for dF/dy (v momentum) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! for dF/dy (v momentum) + !----------------------------------------------------------------- strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) @@ -871,6 +1620,658 @@ subroutine stress (nx_block, ny_block, & end subroutine stress +!======================================================================= +! Computes the strain rates and internal stress components for C grid +! +! author: JF Lemieux, ECCC +! updated: D. Bailey, NCAR +! Nov 2021 +! +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The +! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. +! +! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method +! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. + + subroutine stressC_T (nx_block, ny_block , & + icellt , & + indxti , indxtj , & + uvelE , vvelE , & + uvelN , vvelN , & + dxN , dyE , & + dxT , dyT , & + uarea , DminTarea, & + strength, shearU , & + zetax2T , etax2T , & + stressp , stressm ) + + use ice_dyn_shared, only: strain_rates_T, capping, & + visc_replpress, e_factor + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the E point + uvelN , & ! x-component of velocity (m/s) at the N point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + strength , & ! ice strength (N/m) + shearU , & ! shearU + uarea , & ! area of u cell + DminTarea ! deltaminEVP*tarea + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) + etax2T , & ! etax2 = 2*eta (shear viscosity) + stressp , & ! sigma11+sigma22 + stressm ! sigma11-sigma22 + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + divT , & ! divergence at T point + tensionT ! tension at T point + + real (kind=dbl_kind) :: & + shearTsqr , & ! strain rates squared at T point + DeltaT , & ! delt at T point + rep_prsT ! replacement pressure at T point + + character(len=*), parameter :: subname = '(stressC_T)' + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + call strain_rates_T (nx_block , ny_block , & + icellt , & + indxti(:) , indxtj (:) , & + uvelE (:,:), vvelE (:,:), & + uvelN (:,:), vvelN (:,:), & + dxN (:,:), dyE (:,:), & + dxT (:,:), dyT (:,:), & + divT (:,:), tensionT(:,:) ) + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! Square of shear strain rate at T obtained from interpolation of + ! U point values (Bouillon et al., 2013, Kimmritz et al., 2016 + !----------------------------------------------------------------- + + shearTsqr = (shearU(i ,j )**2 * uarea(i ,j ) & + + shearU(i ,j-1)**2 * uarea(i ,j-1) & + + shearU(i-1,j-1)**2 * uarea(i-1,j-1) & + + shearU(i-1,j )**2 * uarea(i-1,j )) & + / (uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) + + DeltaT = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) + + !----------------------------------------------------------------- + ! viscosities and replacement pressure at T point + !----------------------------------------------------------------- + + call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT, & + zetax2T (i,j), etax2T (i,j), rep_prsT, capping) + + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + !----------------------------------------------------------------- + + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stressp(i,j) = (stressp(i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 + + stressm(i,j) = (stressm(i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 + + enddo ! ij + + end subroutine stressC_T + +!======================================================================= +! +! Computes the strain rates and internal stress components for U points +! +! author: JF Lemieux, ECCC +! Nov 2021 +! +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The +! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. +! +! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method +! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. + + subroutine stressC_U (nx_block , ny_block, & + icellu, & + indxui , indxuj, & + uarea , & + etax2U , deltaU, & + strengthU, shearU, & + stress12 ) + + use ice_dyn_shared, only: visc_replpress, & + visc_method, deltaminEVP, capping + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellu ! no. of cells where iceumask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uarea , & ! area of U point + etax2U , & ! 2*eta at the U point + shearU , & ! shearU array + deltaU , & ! deltaU array + strengthU ! ice strength at the U point + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + stress12 ! sigma12 + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + lzetax2U , & ! bulk viscosity at U point + letax2U , & ! shear viscosity at U point + lrep_prsU, & ! replacement pressure at U point + DminUarea ! Dmin on U + + character(len=*), parameter :: subname = '(stressC_U)' + + !----------------------------------------------------------------- + ! viscosities and replacement pressure at U point + ! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 + ! avg_strength: C2 method of Kimmritz et al. 2016 + ! if outside do and stress12 equation repeated in each loop for performance + !----------------------------------------------------------------- + + if (visc_method == 'avg_zeta') then + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2U(i,j)*shearU(i,j)) * denom1 + enddo + + elseif (visc_method == 'avg_strength') then + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + DminUarea = deltaminEVP*uarea(i,j) + ! only need etax2U here, but other terms are calculated with etax2U + ! minimal extra calculations here even though it seems like there is + call visc_replpress (strengthU(i,j), DminUarea, DeltaU(i,j), & + lzetax2U , letax2U , lrep_prsU , capping) + stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*letax2U*shearU(i,j)) * denom1 + enddo + + endif + + end subroutine stressC_U + +!======================================================================= +! Computes the strain rates and internal stress components for T points +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine stressCD_T (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + DminTarea, & + strength, & + zetax2T, etax2T, & + stresspT, stressmT, & + stress12T ) + + use ice_dyn_shared, only: strain_rates_T, capping, & + visc_replpress + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + strength , & ! ice strength (N/m) + DminTarea ! deltaminEVP*tarea + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) + etax2T , & ! etax2 = 2*eta (shear viscosity) + stresspT , & ! sigma11+sigma22 + stressmT , & ! sigma11-sigma22 + stress12T ! sigma12 + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + divT , & ! divergence at T point + tensionT , & ! tension at T point + shearT , & ! sheat at T point + DeltaT ! delt at T point + + real (kind=dbl_kind) :: & + rep_prsT ! replacement pressure at T point + + character(len=*), parameter :: subname = '(stressCD_T)' + + !----------------------------------------------------------------- + ! strain rates at T point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + call strain_rates_T (nx_block , ny_block , & + icellt , & + indxti(:) , indxtj (:) , & + uvelE (:,:), vvelE (:,:), & + uvelN (:,:), vvelN (:,:), & + dxN (:,:), dyE (:,:), & + dxT (:,:), dyT (:,:), & + divT (:,:), tensionT(:,:), & + shearT(:,:), DeltaT (:,:) ) + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! viscosities and replacement pressure at T point + !----------------------------------------------------------------- + + call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT(i,j), & + zetax2T (i,j), etax2T (i,j), rep_prsT , capping) + + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + !----------------------------------------------------------------- + + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stresspT(i,j) = (stresspT (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 + + stressmT(i,j) = (stressmT (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 + + stress12T(i,j) = (stress12T(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2T(i,j)*shearT(i,j)) * denom1 + + enddo ! ij + + end subroutine stressCD_T + +!======================================================================= +! Computes the strain rates and internal stress components for U points +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine stressCD_U (nx_block, ny_block, & + icellu, & + indxui, indxuj, & + uarea, & + zetax2U, etax2U, & + strengthU, & + divergU, tensionU, & + shearU, DeltaU, & + stresspU, stressmU, & + stress12U ) + + use ice_dyn_shared, only: strain_rates_U, & + visc_replpress, & + visc_method, deltaminEVP, capping + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellu ! no. of cells where iceumask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uarea , & ! area of U-cell (m^2) + zetax2U , & ! 2*zeta at U point + etax2U , & ! 2*eta at U point + strengthU, & ! ice strength at U point + divergU , & ! div at U point + tensionU , & ! tension at U point + shearU , & ! shear at U point + deltaU ! delt at U point + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + stresspU , & ! sigma11+sigma22 + stressmU , & ! sigma11-sigma22 + stress12U ! sigma12 + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + lzetax2U , & ! bulk viscosity at U point + letax2U , & ! shear viscosity at U point + lrep_prsU , & ! replacement pressure at U point + DminUarea ! Dmin on U + + character(len=*), parameter :: subname = '(stressCD_U)' + + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + + !----------------------------------------------------------------- + ! viscosities and replacement pressure at U point + ! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 + ! avg_strength: C2 method of Kimmritz et al. 2016 + !----------------------------------------------------------------- + + if (visc_method == 'avg_zeta') then + lzetax2U = zetax2U(i,j) + letax2U = etax2U(i,j) + lrep_prsU = (c1-Ktens)/(c1+Ktens)*lzetax2U*deltaU(i,j) + + elseif (visc_method == 'avg_strength') then + DminUarea = deltaminEVP*uarea(i,j) + ! only need etax2U here, but other terms are calculated with etax2U + ! minimal extra calculations here even though it seems like there is + call visc_replpress (strengthU(i,j), DminUarea, DeltaU(i,j), & + lzetax2U , letax2U , lrep_prsU , capping) + endif + + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + !----------------------------------------------------------------- + + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stresspU(i,j) = (stresspU (i,j)*(c1-arlx1i*revp) & + + arlx1i*(lzetax2U*divergU(i,j) - lrep_prsU)) * denom1 + + stressmU(i,j) = (stressmU (i,j)*(c1-arlx1i*revp) & + + arlx1i*letax2U*tensionU(i,j)) * denom1 + + stress12U(i,j) = (stress12U(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*letax2U*shearU(i,j)) * denom1 + + enddo ! ij + + end subroutine stressCD_U + +!======================================================================= +! Computes divergence of stress tensor at the E or N point for the mom equation +! +! author: JF Lemieux, ECCC +! Nov 2021 +! +! Hunke, E. C., and J. K. Dukowicz (2002). The Elastic-Viscous-Plastic +! Sea Ice Dynamics Model in General Orthogonal Curvilinear Coordinates +! on a Sphere - Incorporation of Metric Terms. Mon. Weather Rev., +! 130, 1848-1865. +! +! Bouillon, S., M. Morales Maqueda, V. Legat and T. Fichefet (2009). An +! elastic-viscous-plastic sea ice model formulated on Arakawa B and C grids. +! Ocean Model., 27, 174-184. + + subroutine div_stress_Ex(nx_block, ny_block, & + icell , & + indxi , indxj , & + dxE , dyE , & + dxU , dyT , & + arear , & + stressp , stressm , & + stress12, & + strintx ) + + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell ! no. of cells where epm (or npm) = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + dxE , & ! width of E or N-cell through the middle (m) + dyE , & ! height of E or N-cell through the middle (m) + dxU , & ! width of T or U-cell through the middle (m) + dyT , & ! height of T or U-cell through the middle (m) + arear ! earear or narear + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & + stressp , & ! stressp (U or T) used for strintx calculation + stressm , & ! stressm (U or T) used for strintx calculation + stress12 ! stress12 (U or T) used for strintx calculation + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & + strintx ! div of stress tensor for u component + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + character(len=*), parameter :: subname = '(div_stress_Ex)' + + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) + strintx(i,j) = arear(i,j) * & + ( p5 * dyE(i,j) * ( stressp(i+1,j ) - stressp (i ,j ) ) & + + (p5/ dyE(i,j)) * ( (dyT(i+1,j )**2) * stressm (i+1,j ) & + -(dyT(i ,j )**2) * stressm (i ,j ) ) & + + (c1/ dxE(i,j)) * ( (dxU(i ,j )**2) * stress12(i ,j ) & + -(dxU(i ,j-1)**2) * stress12(i ,j-1) ) ) + enddo + + end subroutine div_stress_Ex + +!======================================================================= + subroutine div_stress_Ey(nx_block, ny_block, & + icell , & + indxi , indxj , & + dxE , dyE , & + dxU , dyT , & + arear , & + stressp , stressm , & + stress12, & + strinty ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell ! no. of cells where epm (or npm) = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + dxE , & ! width of E or N-cell through the middle (m) + dyE , & ! height of E or N-cell through the middle (m) + dxU , & ! width of T or U-cell through the middle (m) + dyT , & ! height of T or U-cell through the middle (m) + arear ! earear or narear + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & + stressp , & ! stressp (U or T) used for strinty calculation + stressm , & ! stressm (U or T) used for strinty calculation + stress12 ! stress12 (U or T) used for strinty calculation + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & + strinty ! div of stress tensor for v component + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + character(len=*), parameter :: subname = '(div_stress_Ey)' + + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) + strinty(i,j) = arear(i,j) * & + ( p5 * dxE(i,j) * ( stressp(i ,j ) - stressp (i ,j-1) ) & + - (p5/ dxE(i,j)) * ( (dxU(i ,j )**2) * stressm (i ,j ) & + -(dxU(i ,j-1)**2) * stressm (i ,j-1) ) & + + (c1/ dyE(i,j)) * ( (dyT(i+1,j )**2) * stress12(i+1,j ) & + -(dyT(i ,j )**2) * stress12(i ,j ) ) ) + enddo + + end subroutine div_stress_Ey + +!======================================================================= + subroutine div_stress_Nx(nx_block, ny_block, & + icell , & + indxi , indxj , & + dxN , dyN , & + dxT , dyU , & + arear , & + stressp , stressm , & + stress12, & + strintx ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell ! no. of cells where epm (or npm) = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + dxN , & ! width of E or N-cell through the middle (m) + dyN , & ! height of E or N-cell through the middle (m) + dxT , & ! width of T or U-cell through the middle (m) + dyU , & ! height of T or U-cell through the middle (m) + arear ! earear or narear + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & + stressp , & ! stressp (U or T) used for strintx calculation + stressm , & ! stressm (U or T) used for strintx calculation + stress12 ! stress12 (U or T) used for strintx calculation + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & + strintx ! div of stress tensor for u component + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + character(len=*), parameter :: subname = '(div_stress_Nx)' + + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) + strintx(i,j) = arear(i,j) * & + ( p5 * dyN(i,j) * ( stressp(i ,j ) - stressp (i-1,j ) ) & + + (p5/ dyN(i,j)) * ( (dyU(i ,j )**2) * stressm (i ,j ) & + -(dyU(i-1,j )**2) * stressm (i-1,j ) ) & + + (c1/ dxN(i,j)) * ( (dxT(i ,j+1)**2) * stress12(i ,j+1) & + -(dxT(i ,j )**2) * stress12(i ,j ) ) ) + enddo + + end subroutine div_stress_Nx + +!======================================================================= + subroutine div_stress_Ny(nx_block, ny_block, & + icell , & + indxi , indxj , & + dxN , dyN , & + dxT , dyU , & + arear , & + stressp , stressm , & + stress12, & + strinty ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell ! no. of cells where epm (or npm) = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + dxN , & ! width of E or N-cell through the middle (m) + dyN , & ! height of E or N-cell through the middle (m) + dxT , & ! width of T or U-cell through the middle (m) + dyU , & ! height of T or U-cell through the middle (m) + arear ! earear or narear + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & + stressp , & ! stressp (U or T) used for strinty calculation + stressm , & ! stressm (U or T) used for strinty calculation + stress12 ! stress12 (U or T) used for strinty calculation + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & + strinty ! div of stress tensor for v component + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + character(len=*), parameter :: subname = '(div_stress_Ny)' + + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) + strinty(i,j) = arear(i,j) * & + ( p5 * dxN(i,j) * ( stressp(i ,j+1) - stressp (i ,j ) ) & + - (p5/ dxN(i,j)) * ( (dxT(i ,j+1)**2) * stressm (i ,j+1) & + -(dxT(i ,j )**2) * stressm (i ,j ) ) & + + (c1/ dyN(i,j)) * ( (dyU(i ,j )**2) * stress12(i ,j ) & + -(dyU(i-1,j )**2) * stress12(i-1,j ) ) ) + enddo + + end subroutine div_stress_Ny + !======================================================================= end module ice_dyn_evp diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 old mode 100755 new mode 100644 index 43bbe41a0..1ca41898d --- a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 @@ -132,7 +132,8 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & use ice_kinds_mod use ice_constants, only : p027, p055, p111, p166, p222, p25, & p333, p5, c1p5, c1 - use ice_dyn_shared, only : ecci, denom1, arlx1i, Ktens, revp + use ice_dyn_shared, only : ecci, denom1, arlx1i, Ktens, revp, & + deltaminEVP implicit none @@ -152,7 +153,7 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & ! local variables integer(kind=int_kind) :: iw, il, iu - real(kind=dbl_kind) :: puny, divune, divunw, divuse, divusw, & + real(kind=dbl_kind) :: divune, divunw, divuse, divusw, & tensionne, tensionnw, tensionse, tensionsw, shearne, shearnw, & shearse, shearsw, Deltane, Deltanw, Deltase, Deltasw, c0ne, & c0nw, c0se, c0sw, c1ne, c1nw, c1se, c1sw, ssigpn, ssigps, & @@ -163,17 +164,10 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & csig12se, csig12sw, str12ew, str12we, str12ns, str12sn, & strp_tmp, strm_tmp, tmp_uvel_ee, tmp_vvel_se, tmp_vvel_ee, & tmp_vvel_ne, tmp_uvel_ne, tmp_uvel_se, dxhy, dyhx, cxp, cyp, & - cxm, cym, tinyarea,tmparea + cxm, cym, tmparea, DminTarea character(len=*), parameter :: subname = '(stress_iter)' - call icepack_query_parameters(puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) then - call abort_ice(error_message=subname, file=__FILE__, & - line=__LINE__) - end if - #ifdef _OPENACC !$acc parallel & !$acc present(ee, ne, se, strength, uvel, vvel, dxt, dyt, hte, & @@ -190,14 +184,14 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & if (skiptcell(iw)) cycle - tmparea = dxt(iw) * dyt(iw) ! necessary to split calc of tinyarea. Otherwize not binary identical - tinyarea = puny * tmparea - dxhy = p5 * (hte(iw) - htem1(iw)) - dyhx = p5 * (htn(iw) - htnm1(iw)) - cxp = c1p5 * htn(iw) - p5 * htnm1(iw) - cyp = c1p5 * hte(iw) - p5 * htem1(iw) - cxm = -(c1p5 * htnm1(iw) - p5 * htn(iw)) - cym = -(c1p5 * htem1(iw) - p5 * hte(iw)) + tmparea = dxt(iw) * dyt(iw) ! necessary to split calc of DminTarea. Otherwize not binary identical + DminTarea = deltaminEVP * tmparea + dxhy = p5 * (hte(iw) - htem1(iw)) + dyhx = p5 * (htn(iw) - htnm1(iw)) + cxp = c1p5 * htn(iw) - p5 * htnm1(iw) + cyp = c1p5 * hte(iw) - p5 * htem1(iw) + cxm = -(c1p5 * htnm1(iw) - p5 * htn(iw)) + cym = -(c1p5 * htem1(iw) - p5 * hte(iw)) !-------------------------------------------------------------- ! strain rates @@ -252,10 +246,10 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & ! save replacement pressure for principal stress calculation !-------------------------------------------------------------- - c0ne = strength(iw) / max(Deltane, tinyarea) - c0nw = strength(iw) / max(Deltanw, tinyarea) - c0sw = strength(iw) / max(Deltasw, tinyarea) - c0se = strength(iw) / max(Deltase, tinyarea) + c0ne = strength(iw) / max(Deltane, DminTarea) + c0nw = strength(iw) / max(Deltanw, DminTarea) + c0sw = strength(iw) / max(Deltasw, DminTarea) + c0se = strength(iw) / max(Deltase, DminTarea) c1ne = c0ne * arlx1i c1nw = c0nw * arlx1i @@ -408,7 +402,8 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & use ice_kinds_mod use ice_constants, only : p027, p055, p111, p166, p222, p25, & p333, p5, c1p5, c1, c0 - use ice_dyn_shared, only : ecci, denom1, arlx1i, Ktens, revp + use ice_dyn_shared, only : ecci, denom1, arlx1i, Ktens, revp,& + deltaminEVP implicit none @@ -429,7 +424,7 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & ! local variables integer(kind=int_kind) :: iw, il, iu - real(kind=dbl_kind) :: puny, divune, divunw, divuse, divusw, & + real(kind=dbl_kind) :: divune, divunw, divuse, divusw, & tensionne, tensionnw, tensionse, tensionsw, shearne, shearnw, & shearse, shearsw, Deltane, Deltanw, Deltase, Deltasw, c0ne, & c0nw, c0se, c0sw, c1ne, c1nw, c1se, c1sw, ssigpn, ssigps, & @@ -440,17 +435,10 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & csig12se, csig12sw, str12ew, str12we, str12ns, str12sn, & strp_tmp, strm_tmp, tmp_uvel_ee, tmp_vvel_se, tmp_vvel_ee, & tmp_vvel_ne, tmp_uvel_ne, tmp_uvel_se, dxhy, dyhx, cxp, cyp, & - cxm, cym, tinyarea, tmparea + cxm, cym, tmparea, DminTarea character(len=*), parameter :: subname = '(stress_last)' - call icepack_query_parameters(puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) then - call abort_ice(error_message=subname, file=__FILE__, & - line=__LINE__) - end if - #ifdef _OPENACC !$acc parallel & !$acc present(ee, ne, se, strength, uvel, vvel, dxt, dyt, hte, & @@ -468,14 +456,14 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & if (skiptcell(iw)) cycle - tmparea = dxt(iw) * dyt(iw) ! necessary to split calc of tinyarea. Otherwize not binary identical - tinyarea = puny * tmparea - dxhy = p5 * (hte(iw) - htem1(iw)) - dyhx = p5 * (htn(iw) - htnm1(iw)) - cxp = c1p5 * htn(iw) - p5 * htnm1(iw) - cyp = c1p5 * hte(iw) - p5 * htem1(iw) - cxm = -(c1p5 * htnm1(iw) - p5 * htn(iw)) - cym = -(c1p5 * htem1(iw) - p5 * hte(iw)) + tmparea = dxt(iw) * dyt(iw) ! necessary to split calc of DminTarea. Otherwize not binary identical + DminTarea = deltaminEVP * tmparea + dxhy = p5 * (hte(iw) - htem1(iw)) + dyhx = p5 * (htn(iw) - htnm1(iw)) + cxp = c1p5 * htn(iw) - p5 * htnm1(iw) + cyp = c1p5 * hte(iw) - p5 * htem1(iw) + cxm = -(c1p5 * htnm1(iw) - p5 * htn(iw)) + cym = -(c1p5 * htem1(iw) - p5 * hte(iw)) !-------------------------------------------------------------- ! strain rates @@ -545,10 +533,10 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & ! save replacement pressure for principal stress calculation !-------------------------------------------------------------- - c0ne = strength(iw) / max(Deltane, tinyarea) - c0nw = strength(iw) / max(Deltanw, tinyarea) - c0sw = strength(iw) / max(Deltasw, tinyarea) - c0se = strength(iw) / max(Deltase, tinyarea) + c0ne = strength(iw) / max(Deltane, DminTarea) + c0nw = strength(iw) / max(Deltanw, DminTarea) + c0sw = strength(iw) / max(Deltasw, DminTarea) + c0se = strength(iw) / max(Deltase, DminTarea) c1ne = c0ne * arlx1i c1nw = c0nw * arlx1i diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 old mode 100755 new mode 100644 index 76d0caf41..bc7f3abb1 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -10,31 +10,35 @@ module ice_dyn_shared use ice_kinds_mod - use ice_communicate, only: my_task, master_task + use ice_communicate, only: my_task, master_task, get_num_procs use ice_constants, only: c0, c1, c2, c3, c4, c6 use ice_constants, only: omega, spval_dbl, p01, p001, p5 use ice_blocks, only: nx_block, ny_block use ice_domain_size, only: max_blocks use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice + use ice_grid, only: grid_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters implicit none private - public :: init_dyn, set_evp_parameters, stepu, principal_stress, & - dyn_prep1, dyn_prep2, dyn_finish, & + public :: set_evp_parameters, stepu, stepuv_CD, stepu_C, stepv_C, & + principal_stress, init_dyn, dyn_prep1, dyn_prep2, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & - alloc_dyn_shared, deformations, strain_rates, & - viscous_coeffs_and_rep_pressure, & - stack_velocity_field, unstack_velocity_field + alloc_dyn_shared, & + deformations, deformationsC_T, deformationsCD_T, & + strain_rates, strain_rates_T, strain_rates_U, & + visc_replpress, & + dyn_haloUpdate, & + stack_fields, unstack_fields ! namelist parameters integer (kind=int_kind), public :: & kdyn , & ! type of dynamics ( -1, 0 = off, 1 = evp, 2 = eap ) kridge , & ! set to "-1" to turn off ridging - ndte ! number of subcycles: ndte=dt/dte + ndte ! number of subcycles: ndte=dt/dte character (len=char_len), public :: & coriolis , & ! 'constant', 'zero', or 'latitude' @@ -45,16 +49,20 @@ module ice_dyn_shared character (len=char_len), public :: & evp_algorithm ! standard_2d = 2D org version (standard) - ! shared_mem_1d = 1d without mpi call and refactorization to 1d + ! shared_mem_1d = 1d without mpi call and refactorization to 1d + + real (kind=dbl_kind), public :: & + elasticDamp ! coefficient for calculating the parameter E, elastic damping parameter + ! other EVP parameters - character (len=char_len), public :: & - yield_curve , & ! 'ellipse' ('teardrop' needs further testing) + character (len=char_len), public :: & + yield_curve , & ! 'ellipse' ('teardrop' needs further testing) + visc_method , & ! method for viscosity calc at U points (C, CD grids) seabed_stress_method ! method for seabed stress calculation - ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. in prep. - + ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. in prep. + real (kind=dbl_kind), parameter, public :: & - eyc = 0.36_dbl_kind, & ! coefficient for calculating the parameter E u0 = 5e-5_dbl_kind, & ! residual velocity for seabed stress (m/s) cosw = c1 , & ! cos(ocean turning angle) ! turning angle = 0 sinw = c0 , & ! sin(ocean turning angle) ! turning angle = 0 @@ -62,42 +70,87 @@ module ice_dyn_shared m_min = p01 ! minimum ice mass (kg/m^2) real (kind=dbl_kind), public :: & - revp , & ! 0 for classic EVP, 1 for revised EVP + revp , & ! 0 for classic EVP, 1 for revised EVP e_yieldcurve, & ! VP aspect ratio of elliptical yield curve e_plasticpot, & ! VP aspect ratio of elliptical plastic potential - epp2i , & ! 1/(e_plasticpot)^2 - e_factor , & ! (e_yieldcurve)^2/(e_plasticpot)^4 - ecci , & ! temporary for 1d evp - dtei , & ! 1/dte, where dte is subcycling timestep (1/s) -! dte2T , & ! dte/2T - denom1 ! constants for stress equation + epp2i , & ! 1/(e_plasticpot)^2 + e_factor , & ! (e_yieldcurve)^2/(e_plasticpot)^4 + ecci , & ! temporary for 1d evp + deltaminEVP , & ! minimum delta for viscosities (EVP) + deltaminVP , & ! minimum delta for viscosities (VP) + capping , & ! capping of viscosities (1=Hibler79, 0=Kreyscher2000) + dtei , & ! 1/dte, where dte is subcycling timestep (1/s) +! dte2T , & ! dte/2T + denom1 ! constants for stress equation real (kind=dbl_kind), public :: & ! Bouillon et al relaxation constants - arlx , & ! alpha for stressp - arlx1i , & ! (inverse of alpha) for stressp - brlx ! beta for momentum + arlx , & ! alpha for stressp + arlx1i , & ! (inverse of alpha) for stressp + brlx ! beta for momentum + + real (kind=dbl_kind), allocatable, public :: & + fcor_blk(:,:,:) ! Coriolis parameter (1/s) + + real (kind=dbl_kind), allocatable, public :: & + fcorE_blk(:,:,:), & ! Coriolis parameter at E points (1/s) + fcorN_blk(:,:,:) ! Coriolis parameter at N points (1/s) + + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + uvel_init , & ! x-component of velocity (m/s), beginning of timestep + vvel_init ! y-component of velocity (m/s), beginning of timestep - real (kind=dbl_kind), allocatable, public :: & - fcor_blk(:,:,:) ! Coriolis parameter (1/s) + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + uvelN_init , & ! x-component of velocity (m/s), beginning of timestep + vvelN_init ! y-component of velocity (m/s), beginning of timestep real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - uvel_init, & ! x-component of velocity (m/s), beginning of timestep - vvel_init ! y-component of velocity (m/s), beginning of timestep + uvelE_init , & ! x-component of velocity (m/s), beginning of timestep + vvelE_init ! y-component of velocity (m/s), beginning of timestep + + real (kind=dbl_kind), allocatable, public :: & + DminTarea(:,:,:) ! deltamin * tarea (m^2/s) ! ice isotropic tensile strength parameter real (kind=dbl_kind), public :: & - Ktens ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) + Ktens ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) ! seabed (basal) stress parameters and settings logical (kind=log_kind), public :: & - seabed_stress ! if true, seabed stress for landfast on + seabed_stress ! if true, seabed stress for landfast on real (kind=dbl_kind), public :: & - k1 , & ! 1st free parameter for seabed1 grounding parameterization - k2 , & ! second free parameter (N/m^3) for seabed1 grounding parametrization - alphab , & ! alphab=Cb factor in Lemieux et al 2015 - threshold_hw ! max water depth for grounding - ! see keel data from Amundrud et al. 2004 (JGR) + k1 , & ! 1st free parameter for seabed1 grounding parameterization + k2 , & ! second free parameter (N/m^3) for seabed1 grounding parametrization + alphab , & ! alphab=Cb factor in Lemieux et al 2015 + threshold_hw ! max water depth for grounding + ! see keel data from Amundrud et al. 2004 (JGR) + + interface strain_rates_T + module procedure strain_rates_Tdt + module procedure strain_rates_Tdtsd + end interface + + interface dyn_haloUpdate + module procedure dyn_haloUpdate1 + module procedure dyn_haloUpdate2 + module procedure dyn_haloUpdate3 + module procedure dyn_haloUpdate4 + module procedure dyn_haloUpdate5 + end interface + + interface stack_fields + module procedure stack_fields2 + module procedure stack_fields3 + module procedure stack_fields4 + module procedure stack_fields5 + end interface + + interface unstack_fields + module procedure unstack_fields2 + module procedure unstack_fields3 + module procedure unstack_fields4 + module procedure unstack_fields5 + end interface !======================================================================= @@ -105,36 +158,49 @@ module ice_dyn_shared !======================================================================= ! -! Allocate space for all variables +! Allocate space for all variables ! subroutine alloc_dyn_shared integer (int_kind) :: ierr + character(len=*), parameter :: subname = '(alloc_dyn_shared)' + allocate( & uvel_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvel_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep stat=ierr) - if (ierr/=0) call abort_ice('(alloc_dyn_shared): Out of memory') + if (ierr/=0) call abort_ice(subname//': Out of memory') + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + allocate( & + uvelE_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep + vvelE_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep + uvelN_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep + vvelN_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep + stat=ierr) + if (ierr/=0) call abort_ice(subname//': Out of memory') + endif end subroutine alloc_dyn_shared !======================================================================= - ! Initialize parameters and variables needed for the dynamics ! author: Elizabeth C. Hunke, LANL subroutine init_dyn (dt) use ice_blocks, only: nx_block, ny_block - use ice_domain, only: nblocks + use ice_domain, only: nblocks, halo_dynbundle use ice_domain_size, only: max_blocks use ice_flux, only: rdg_conv, rdg_shear, iceumask, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4 - use ice_state, only: uvel, vvel, divu, shear - use ice_grid, only: ULAT + stress12_1, stress12_2, stress12_3, stress12_4, & + stresspT, stressmT, stress12T, & + stresspU, stressmU, stress12U + use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear + use ice_grid, only: ULAT, NLAT, ELAT, tarea real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -142,20 +208,33 @@ subroutine init_dyn (dt) ! local variables integer (kind=int_kind) :: & - i, j, & - iblk ! block index + i, j , & ! indices + nprocs, & ! number of processors + iblk ! block index character(len=*), parameter :: subname = '(init_dyn)' call set_evp_parameters (dt) + ! Set halo_dynbundle, this is empirical at this point, could become namelist + halo_dynbundle = .true. + nprocs = get_num_procs() + if (nx_block*ny_block/nprocs > 100) halo_dynbundle = .false. + if (my_task == master_task) then write(nu_diag,*) 'dt = ',dt write(nu_diag,*) 'dte = ',dt/real(ndte,kind=dbl_kind) - write(nu_diag,*) 'tdamp =', eyc*dt + write(nu_diag,*) 'tdamp =', elasticDamp * dt + write(nu_diag,*) 'halo_dynbundle =', halo_dynbundle endif allocate(fcor_blk(nx_block,ny_block,max_blocks)) + allocate(DminTarea(nx_block,ny_block,max_blocks)) + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + allocate(fcorE_blk(nx_block,ny_block,max_blocks)) + allocate(fcorN_blk(nx_block,ny_block,max_blocks)) + endif !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks @@ -165,6 +244,12 @@ subroutine init_dyn (dt) ! velocity uvel(i,j,iblk) = c0 ! m/s vvel(i,j,iblk) = c0 ! m/s + if (grid_ice == 'CD' .or. grid_ice == 'C') then ! extra velocity variables + uvelE(i,j,iblk) = c0 + vvelE(i,j,iblk) = c0 + uvelN(i,j,iblk) = c0 + vvelN(i,j,iblk) = c0 + endif ! strain rates divu (i,j,iblk) = c0 @@ -181,6 +266,21 @@ subroutine init_dyn (dt) fcor_blk(i,j,iblk) = c2*omega*sin(ULAT(i,j,iblk)) ! 1/s endif + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + if (trim(coriolis) == 'constant') then + fcorE_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s + fcorN_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s + else if (trim(coriolis) == 'zero') then + fcorE_blk(i,j,iblk) = c0 + fcorN_blk(i,j,iblk) = c0 + else + fcorE_blk(i,j,iblk) = c2*omega*sin(ELAT(i,j,iblk)) ! 1/s + fcorN_blk(i,j,iblk) = c2*omega*sin(NLAT(i,j,iblk)) ! 1/s + endif + + endif + ! stress tensor, kg/s^2 stressp_1 (i,j,iblk) = c0 stressp_2 (i,j,iblk) = c0 @@ -195,6 +295,21 @@ subroutine init_dyn (dt) stress12_3(i,j,iblk) = c0 stress12_4(i,j,iblk) = c0 + if (grid_ice == 'CD' .or. grid_ice == 'C') then + stresspT (i,j,iblk) = c0 + stressmT (i,j,iblk) = c0 + stress12T (i,j,iblk) = c0 + stresspU (i,j,iblk) = c0 + stressmU (i,j,iblk) = c0 + stress12U (i,j,iblk) = c0 + endif + + if (kdyn == 1) then + DminTarea(i,j,iblk) = deltaminEVP*tarea(i,j,iblk) + elseif (kdyn == 3) then + DminTarea(i,j,iblk) = deltaminVP*tarea(i,j,iblk) + endif + ! ice extent mask on velocity points iceumask(i,j,iblk) = .false. @@ -206,7 +321,6 @@ subroutine init_dyn (dt) end subroutine init_dyn !======================================================================= - ! Set parameters needed for the evp dynamics. ! Note: This subroutine is currently called only during initialization. ! If the dynamics time step can vary during runtime, it should @@ -230,7 +344,7 @@ subroutine set_evp_parameters (dt) ! elastic time step !dte = dt/real(ndte,kind=dbl_kind) ! s !dtei = c1/dte ! 1/s - dtei = real(ndte,kind=dbl_kind)/dt + dtei = real(ndte,kind=dbl_kind)/dt ! variables for elliptical yield curve and plastic potential epp2i = c1/e_plasticpot**2 @@ -238,8 +352,8 @@ subroutine set_evp_parameters (dt) ecci = c1/e_yieldcurve**2 ! temporary for 1d evp ! constants for stress equation - !tdamp2 = c2*eyc*dt ! s - !dte2T = dte/tdamp2 or c1/(c2*eyc*real(ndte,kind=dbl_kind)) ! ellipse (unitless) + !tdamp2 = c2 * elasticDamp * dt ! s + !dte2T = dte/tdamp2 or c1/(c2*elasticDamp*real(ndte,kind=dbl_kind)) ! ellipse (unitless) if (revised_evp) then ! Bouillon et al, Ocean Mod 2013 revp = c1 @@ -250,7 +364,7 @@ subroutine set_evp_parameters (dt) !arlx1i = dte2T !arlx = c1/arlx1i !brlx = dt*dtei - arlx = c2*eyc*real(ndte,kind=dbl_kind) + arlx = c2 * elasticDamp * real(ndte,kind=dbl_kind) arlx1i = c1/arlx brlx = real(ndte,kind=dbl_kind) denom1 = c1/(c1+arlx1i) @@ -263,7 +377,6 @@ subroutine set_evp_parameters (dt) end subroutine set_evp_parameters !======================================================================= - ! Computes quantities needed in the stress tensor (sigma) ! and momentum (u) equations, but which do not change during ! the thermodynamics/transport time step: @@ -271,12 +384,10 @@ end subroutine set_evp_parameters ! ! author: Elizabeth C. Hunke, LANL - subroutine dyn_prep1 (nx_block, ny_block, & + subroutine dyn_prep1 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - aice, vice, & - vsno, tmask, & - strairxT, strairyT, & - strairx, strairy, & + aice, vice, & + vsno, tmask, & tmass, icetmask) integer (kind=int_kind), intent(in) :: & @@ -286,16 +397,12 @@ subroutine dyn_prep1 (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & aice , & ! concentration of ice vice , & ! volume per unit area of ice (m) - vsno , & ! volume per unit area of snow (m) - strairxT, & ! stress on ice by air, x-direction - strairyT ! stress on ice by air, y-direction + vsno ! volume per unit area of snow (m) logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & tmask ! land/boundary mask, thickness (T-cell) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - strairx , & ! stress on ice by air, x-direction - strairy , & ! stress on ice by air, y-direction tmass ! total mass of ice and snow (kg/m^2) integer (kind=int_kind), dimension (nx_block,ny_block), intent(out) :: & @@ -322,34 +429,26 @@ subroutine dyn_prep1 (nx_block, ny_block, & do j = 1, ny_block do i = 1, nx_block - !----------------------------------------------------------------- - ! total mass of ice and snow, centered in T-cell - ! NOTE: vice and vsno must be up to date in all grid cells, - ! including ghost cells - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! total mass of ice and snow, centered in T-cell + ! NOTE: vice and vsno must be up to date in all grid cells, + ! including ghost cells + !----------------------------------------------------------------- if (tmask(i,j)) then tmass(i,j) = (rhoi*vice(i,j) + rhos*vsno(i,j)) ! kg/m^2 else tmass(i,j) = c0 endif - !----------------------------------------------------------------- - ! ice extent mask (T-cells) - !----------------------------------------------------------------- - tmphm(i,j) = tmask(i,j) .and. (aice (i,j) > a_min) & + !----------------------------------------------------------------- + ! ice extent mask (T-cells) + !----------------------------------------------------------------- + tmphm(i,j) = tmask(i,j) .and. (aice (i,j) > a_min) & .and. (tmass(i,j) > m_min) - !----------------------------------------------------------------- - ! prep to convert to U grid - !----------------------------------------------------------------- - ! these quantities include the factor of aice needed for - ! correct treatment of free drift - strairx(i,j) = strairxT(i,j) - strairy(i,j) = strairyT(i,j) - - !----------------------------------------------------------------- - ! augmented mask (land + open ocean) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! augmented mask (land + open ocean) + !----------------------------------------------------------------- icetmask (i,j) = 0 enddo @@ -359,8 +458,8 @@ subroutine dyn_prep1 (nx_block, ny_block, & do i = ilo, ihi ! extend ice extent mask (T-cells) to points around pack - if (tmphm(i-1,j+1) .or. tmphm(i,j+1) .or. tmphm(i+1,j+1) .or. & - tmphm(i-1,j) .or. tmphm(i,j) .or. tmphm(i+1,j) .or. & + if (tmphm(i-1,j+1) .or. tmphm(i,j+1) .or. tmphm(i+1,j+1) .or. & + tmphm(i-1,j) .or. tmphm(i,j) .or. tmphm(i+1,j) .or. & tmphm(i-1,j-1) .or. tmphm(i,j-1) .or. tmphm(i+1,j-1) ) then icetmask(i,j) = 1 endif @@ -382,31 +481,31 @@ end subroutine dyn_prep1 ! ! author: Elizabeth C. Hunke, LANL - subroutine dyn_prep2 (nx_block, ny_block, & + subroutine dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt, icellu, & - indxti, indxtj, & - indxui, indxuj, & - aiu, umass, & - umassdti, fcor, & - umask, & - uocn, vocn, & - strairx, strairy, & - ss_tltx, ss_tlty, & - icetmask, iceumask, & - fm, dt, & - strtltx, strtlty, & + icellt, icellu, & + indxti, indxtj, & + indxui, indxuj, & + aiu, umass, & + umassdti, fcor, & + umask, & + uocn, vocn, & + strairx, strairy, & + ss_tltx, ss_tlty, & + icetmask, iceumask, & + fm, dt, & + strtltx, strtlty, & strocnx, strocny, & strintx, strinty, & taubx, tauby, & - waterx, watery, & - forcex, forcey, & - stressp_1, stressp_2, & - stressp_3, stressp_4, & - stressm_1, stressm_2, & - stressm_3, stressm_4, & - stress12_1, stress12_2, & - stress12_3, stress12_4, & + waterx, watery, & + forcex, forcey, & + stressp_1, stressp_2, & + stressp_3, stressp_4, & + stressm_1, stressm_2, & + stressm_3, stressm_4, & + stress12_1, stress12_2, & + stress12_3, stress12_4, & uvel_init, vvel_init, & uvel, vvel, & Tbu) @@ -416,14 +515,14 @@ subroutine dyn_prep2 (nx_block, ny_block, & ilo,ihi,jlo,jhi ! beginning and end of physical domain integer (kind=int_kind), intent(out) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + icellt , & ! no. of cells where icetmask = 1 + icellu ! no. of cells where iceumask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(out) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxti , & ! compressed index in i-direction + indxtj , & ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & umask ! land/boundary mask, thickness (U-cell) @@ -479,7 +578,8 @@ subroutine dyn_prep2 (nx_block, ny_block, & integer (kind=int_kind) :: & i, j, ij - real (kind=dbl_kind) :: gravit + real (kind=dbl_kind) :: & + gravit logical (kind=log_kind), dimension(nx_block,ny_block) :: & iceumask_old ! old-time iceumask @@ -501,7 +601,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & taubx (i,j) = c0 tauby (i,j) = c0 - if (icetmask(i,j)==0) then + if (icetmask(i,j)==0) then stressp_1 (i,j) = c0 stressp_2 (i,j) = c0 stressp_3 (i,j) = c0 @@ -514,7 +614,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & stress12_2(i,j) = c0 stress12_3(i,j) = c0 stress12_4(i,j) = c0 - endif + endif enddo ! i enddo ! j @@ -542,13 +642,17 @@ subroutine dyn_prep2 (nx_block, ny_block, & !----------------------------------------------------------------- icellu = 0 + do j = jlo, jhi do i = ilo, ihi - - ! ice extent mask (U-cells) iceumask_old(i,j) = iceumask(i,j) ! save - iceumask(i,j) = (umask(i,j)) .and. (aiu (i,j) > a_min) & - .and. (umass(i,j) > m_min) +! if (grid_ice == 'B') then ! include ice mask. + ! ice extent mask (U-cells) + iceumask(i,j) = (umask(i,j)) .and. (aiu (i,j) > a_min) & + .and. (umass(i,j) > m_min) +! else ! ice mask shpuld be applied to cd grid. For now it is not implemented. +! iceumask(i,j) = umask(i,j) +! endif if (iceumask(i,j)) then icellu = icellu + 1 @@ -618,7 +722,6 @@ subroutine dyn_prep2 (nx_block, ny_block, & end subroutine dyn_prep2 !======================================================================= - ! Calculation of the surface stresses ! Integration of the momentum equation to find velocity (u,v) ! @@ -663,7 +766,7 @@ subroutine stepu (nx_block, ny_block, & uarear ! 1/uarea real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(in) :: & - str + str ! temporary real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & uvel , & ! x-component of velocity (m/s) @@ -676,7 +779,7 @@ subroutine stepu (nx_block, ny_block, & tauby ! seabed stress, y-direction (N/m^2) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Cw ! ocean-ice neutral drag coefficient + Cw ! ocean-ice neutral drag coefficient ! local variables @@ -715,11 +818,11 @@ subroutine stepu (nx_block, ny_block, & ! ice/ocean stress taux = vrel*waterx(i,j) ! NOTE this is not the entire tauy = vrel*watery(i,j) ! ocn stress term - + Cb = Tbu(i,j) / (sqrt(uold**2 + vold**2) + u0) ! for seabed stress ! revp = 0 for classic evp, 1 for revised evp cca = (brlx + revp)*umassdti(i,j) + vrel * cosw + Cb ! kg/m^2 s - + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s ab2 = cca**2 + ccb**2 @@ -739,7 +842,7 @@ subroutine stepu (nx_block, ny_block, & uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 - ! calculate seabed stress component for outputs + ! calculate seabed stress component for outputs ! only needed on last iteration. taubx(i,j) = -uvel(i,j)*Cb tauby(i,j) = -vvel(i,j)*Cb @@ -748,7 +851,321 @@ subroutine stepu (nx_block, ny_block, & end subroutine stepu !======================================================================= +! Integration of the momentum equation to find velocity (u,v) at E and N locations + + subroutine stepuv_CD (nx_block, ny_block, & + icell, Cw, & + indxi, indxj, & + aiu, & + uocn, vocn, & + waterx, watery, & + forcex, forcey, & + massdti, fm, & + strintx, strinty, & + taubx, tauby, & + uvel_init, vvel_init,& + uvel, vvel, & + Tb) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell ! total count when ice[en]mask is true + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tb, & ! seabed stress factor (N/m^2) + uvel_init,& ! x-component of velocity (m/s), beginning of timestep + vvel_init,& ! y-component of velocity (m/s), beginning of timestep + aiu , & ! ice fraction on [en]-grid + waterx , & ! for ocean stress calculation, x (m/s) + watery , & ! for ocean stress calculation, y (m/s) + forcex , & ! work array: combined atm stress and ocn tilt, x + forcey , & ! work array: combined atm stress and ocn tilt, y + massdti , & ! mass of [EN]-cell/dt (kg/m^2 s) + uocn , & ! ocean current, x-direction (m/s) + vocn , & ! ocean current, y-direction (m/s) + fm , & ! Coriolis param. * mass in [EN]-cell (kg/s) + strintx , & ! divergence of internal ice stress, x (N/m^2) + strinty ! divergence of internal ice stress, y (N/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + uvel , & ! x-component of velocity (m/s) + vvel ! y-component of velocity (m/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + taubx , & ! seabed stress, x-direction (N/m^2) + tauby ! seabed stress, y-direction (N/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Cw ! ocean-ice neutral drag coefficient + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + uold, vold , & ! old-time uvel, vvel + vrel , & ! relative ice-ocean velocity + cca,ccb,ccc,ab2 , & ! intermediate variables + cc1,cc2 , & ! " + taux, tauy , & ! part of ocean stress term + Cb , & ! complete seabed (basal) stress coeff + rhow ! + + character(len=*), parameter :: subname = '(stepuv_CD)' + + !----------------------------------------------------------------- + ! integrate the momentum equation + !----------------------------------------------------------------- + + call icepack_query_parameters(rhow_out=rhow) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + do ij =1, icell + i = indxi(ij) + j = indxj(ij) + + uold = uvel(i,j) + vold = vvel(i,j) + + ! (magnitude of relative ocean current)*rhow*drag*aice + vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + (vocn(i,j) - vold)**2) ! m/s + ! ice/ocean stress + taux = vrel*waterx(i,j) ! NOTE this is not the entire + tauy = vrel*watery(i,j) ! ocn stress term + + ccc = sqrt(uold**2 + vold**2) + u0 + Cb = Tb(i,j) / ccc ! for seabed stress + ! revp = 0 for classic evp, 1 for revised evp + cca = (brlx + revp)*massdti(i,j) + vrel * cosw + Cb ! kg/m^2 s + + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s + + ab2 = cca**2 + ccb**2 + + ! compute the velocity components + cc1 = strintx(i,j) + forcex(i,j) + taux & + + massdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) + cc2 = strinty(i,j) + forcey(i,j) + tauy & + + massdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) + uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s + vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 + + ! calculate seabed stress component for outputs + ! only needed on last iteration. + taubx(i,j) = -uvel(i,j)*Cb + tauby(i,j) = -vvel(i,j)*Cb + + enddo ! ij + + end subroutine stepuv_CD + +!======================================================================= +! Integration of the momentum equation to find velocity u at E location on C grid + + subroutine stepu_C (nx_block, ny_block, & + icell, Cw, & + indxi, indxj, & + aiu, & + uocn, vocn, & + waterx, forcex, & + massdti, fm, & + strintx, taubx, & + uvel_init, & + uvel, vvel, & + Tb) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell ! total count when ice[en]mask is true + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tb, & ! seabed stress factor (N/m^2) + uvel_init,& ! x-component of velocity (m/s), beginning of timestep + aiu , & ! ice fraction on [en]-grid + waterx , & ! for ocean stress calculation, x (m/s) + forcex , & ! work array: combined atm stress and ocn tilt, x + massdti , & ! mass of e-cell/dt (kg/m^2 s) + uocn , & ! ocean current, x-direction (m/s) + vocn , & ! ocean current, y-direction (m/s) + fm , & ! Coriolis param. * mass in e-cell (kg/s) + strintx , & ! divergence of internal ice stress, x (N/m^2) + Cw , & ! ocean-ice neutral drag coefficient + vvel ! y-component of velocity (m/s) interpolated to E location + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + uvel , & ! x-component of velocity (m/s) + taubx ! seabed stress, x-direction (N/m^2) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + uold, vold , & ! old-time uvel, vvel + vrel , & ! relative ice-ocean velocity + cca,ccb,ccc,cc1 , & ! intermediate variables + taux , & ! part of ocean stress term + Cb , & ! complete seabed (basal) stress coeff + rhow ! + + character(len=*), parameter :: subname = '(stepu_C)' + + !----------------------------------------------------------------- + ! integrate the momentum equation + !----------------------------------------------------------------- + + call icepack_query_parameters(rhow_out=rhow) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + do ij =1, icell + i = indxi(ij) + j = indxj(ij) + + uold = uvel(i,j) + vold = vvel(i,j) + + ! (magnitude of relative ocean current)*rhow*drag*aice + vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + (vocn(i,j) - vold)**2) ! m/s + ! ice/ocean stress + taux = vrel*waterx(i,j) ! NOTE this is not the entire + + ccc = sqrt(uold**2 + vold**2) + u0 + Cb = Tb(i,j) / ccc ! for seabed stress + ! revp = 0 for classic evp, 1 for revised evp + cca = (brlx + revp)*massdti(i,j) + vrel * cosw + Cb ! kg/m^2 s + + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s + + ! compute the velocity components + cc1 = strintx(i,j) + forcex(i,j) + taux & + + massdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) + + uvel(i,j) = (ccb*vold + cc1) / cca ! m/s + + ! calculate seabed stress component for outputs + ! only needed on last iteration. + taubx(i,j) = -uvel(i,j)*Cb + + enddo ! ij + + end subroutine stepu_C + +!======================================================================= +! Integration of the momentum equation to find velocity v at N location on C grid + + subroutine stepv_C (nx_block, ny_block, & + icell, Cw, & + indxi, indxj, & + aiu, & + uocn, vocn, & + watery, forcey, & + massdti, fm, & + strinty, tauby, & + vvel_init, & + uvel, vvel, & + Tb) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell ! total count when ice[en]mask is true + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tb, & ! seabed stress factor (N/m^2) + vvel_init,& ! y-component of velocity (m/s), beginning of timestep + aiu , & ! ice fraction on [en]-grid + watery , & ! for ocean stress calculation, y (m/s) + forcey , & ! work array: combined atm stress and ocn tilt, y + massdti , & ! mass of n-cell/dt (kg/m^2 s) + uocn , & ! ocean current, x-direction (m/s) + vocn , & ! ocean current, y-direction (m/s) + fm , & ! Coriolis param. * mass in n-cell (kg/s) + strinty , & ! divergence of internal ice stress, y (N/m^2) + Cw , & ! ocean-ice neutral drag coefficient + uvel ! x-component of velocity (m/s) interpolated to N location + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + vvel , & ! y-component of velocity (m/s) + tauby ! seabed stress, y-direction (N/m^2) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + uold, vold , & ! old-time uvel, vvel + vrel , & ! relative ice-ocean velocity + cca,ccb,ccc,cc2 , & ! intermediate variables + tauy , & ! part of ocean stress term + Cb , & ! complete seabed (basal) stress coeff + rhow ! + + character(len=*), parameter :: subname = '(stepv_C)' + + !----------------------------------------------------------------- + ! integrate the momentum equation + !----------------------------------------------------------------- + + call icepack_query_parameters(rhow_out=rhow) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + do ij =1, icell + i = indxi(ij) + j = indxj(ij) + + uold = uvel(i,j) + vold = vvel(i,j) + + ! (magnitude of relative ocean current)*rhow*drag*aice + vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + (vocn(i,j) - vold)**2) ! m/s + ! ice/ocean stress + tauy = vrel*watery(i,j) ! NOTE this is not the entire ocn stress + + ccc = sqrt(uold**2 + vold**2) + u0 + Cb = Tb(i,j) / ccc ! for seabed stress + ! revp = 0 for classic evp, 1 for revised evp + cca = (brlx + revp)*massdti(i,j) + vrel * cosw + Cb ! kg/m^2 s + + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s + + ! compute the velocity components + cc2 = strinty(i,j) + forcey(i,j) + tauy & + + massdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) + + vvel(i,j) = (-ccb*uold + cc2) / cca + + ! calculate seabed stress component for outputs + ! only needed on last iteration. + tauby(i,j) = -vvel(i,j)*Cb + + enddo ! ij + + end subroutine stepv_C + +!======================================================================= ! Calculation of the ice-ocean stress. ! ...the sign will be reversed later... ! @@ -760,10 +1177,7 @@ subroutine dyn_finish (nx_block, ny_block, & uvel, vvel, & uocn, vocn, & aiu, fm, & -! strintx, strinty, & -! strairx, strairy, & - strocnx, strocny, & - strocnxT, strocnyT) + strocnx, strocny) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -780,28 +1194,22 @@ subroutine dyn_finish (nx_block, ny_block, & vocn , & ! ocean current, y-direction (m/s) aiu , & ! ice fraction on u-grid fm ! Coriolis param. * mass in U-cell (kg/s) -! strintx , & ! divergence of internal ice stress, x (N/m^2) -! strinty , & ! divergence of internal ice stress, y (N/m^2) -! strairx , & ! stress on ice by air, x-direction -! strairy ! stress on ice by air, y-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & strocnx , & ! ice-ocean stress, x-direction strocny ! ice-ocean stress, y-direction - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - strocnxT, & ! ice-ocean stress, x-direction - strocnyT ! ice-ocean stress, y-direction - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Cw ! ocean-ice neutral drag coefficient + Cw ! ocean-ice neutral drag coefficient ! local variables integer (kind=int_kind) :: & i, j, ij - real (kind=dbl_kind) :: vrel, rhow + real (kind=dbl_kind) :: & + vrel , & ! + rhow ! character(len=*), parameter :: subname = '(dyn_finish)' @@ -810,13 +1218,6 @@ subroutine dyn_finish (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - do j = 1, ny_block - do i = 1, nx_block - strocnxT(i,j) = c0 - strocnyT(i,j) = c0 - enddo - enddo - ! ocean-ice stress for coupling do ij =1, icellu i = indxui(ij) @@ -830,7 +1231,7 @@ subroutine dyn_finish (nx_block, ny_block, & ! strocny(i,j) = strocny(i,j) & ! - vrel*(vvel(i,j)*cosw + uvel(i,j)*sinw) * aiu(i,j) - ! update strocnx to most recent iterate and complete the term + ! update strocnx to most recent iterate and complete the term vrel = vrel * aiu(i,j) strocnx(i,j) = vrel*((uocn(i,j) - uvel(i,j))*cosw & - (vocn(i,j) - vvel(i,j))*sinw*sign(c1,fm(i,j))) @@ -842,10 +1243,6 @@ subroutine dyn_finish (nx_block, ny_block, & ! strocnx(i,j) = -(strairx(i,j) + strintx(i,j)) ! strocny(i,j) = -(strairy(i,j) + strinty(i,j)) - ! Prepare to convert to T grid - ! divide by aice for coupling - strocnxT(i,j) = strocnx(i,j) / aiu(i,j) - strocnyT(i,j) = strocny(i,j) / aiu(i,j) enddo end subroutine dyn_finish @@ -853,16 +1250,16 @@ end subroutine dyn_finish !======================================================================= ! Computes seabed (basal) stress factor Tbu (landfast ice) based on mean ! thickness and bathymetry data. LKD refers to linear keel draft. This -! parameterization assumes that the largest keel draft varies linearly +! parameterization assumes that the largest keel draft varies linearly ! with the mean thickness. ! -! Lemieux, J. F., B. Tremblay, F. Dupont, M. Plante, G.C. Smith, D. Dumont (2015). -! A basal stress parameterization form modeling landfast ice, J. Geophys. Res. +! Lemieux, J. F., B. Tremblay, F. Dupont, M. Plante, G.C. Smith, D. Dumont (2015). +! A basal stress parameterization form modeling landfast ice, J. Geophys. Res. ! Oceans, 120, 3157-3173. ! -! Lemieux, J. F., F. Dupont, P. Blain, F. Roy, G.C. Smith, G.M. Flato (2016). +! Lemieux, J. F., F. Dupont, P. Blain, F. Roy, G.C. Smith, G.M. Flato (2016). ! Improving the simulation of landfast ice by combining tensile strength and a -! parameterization for grounded ridges, J. Geophys. Res. Oceans, 121, 7354-7368. +! parameterization for grounded ridges, J. Geophys. Res. Oceans, 121, 7354-7368. ! ! author: JF Lemieux, Philippe Blain (ECCC) ! @@ -873,72 +1270,86 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & icellu, & indxui, indxuj, & vice, aice, & - hwater, Tbu) + hwater, Tbu, & + grid_location) + + use ice_grid, only: grid_neighbor_min, grid_neighbor_max integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where icetmask = 1 + icellu ! no. of cells where ice[uen]mask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - aice , & ! concentration of ice at tracer location - vice , & ! volume per unit area of ice at tracer location (m) - hwater ! water depth at tracer location (m) + aice , & ! concentration of ice at tracer location + vice , & ! volume per unit area of ice at tracer location (m) + hwater ! water depth at tracer location (m) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - Tbu ! seabed stress factor (N/m^2) + Tbu ! seabed stress factor at 'grid_location' (N/m^2) + + character(len=*), optional, intent(inout) :: & + grid_location ! grid location (U, E, N), U assumed if not present real (kind=dbl_kind) :: & au , & ! concentration of ice at u location hu , & ! volume per unit area of ice at u location (mean thickness, m) hwu , & ! water depth at u location (m) docalc_tbu, & ! logical as real (C0,C1) decides whether c0 is 0 or - hcu ! critical thickness at u location (m) + hcu ! critical thickness at u location (m) integer (kind=int_kind) :: & i, j, ij - character(len=*), parameter :: subname = '(seabed1_stress_coeff)' - + character(len=char_len) :: & + l_grid_location ! local version of 'grid_location' + + character(len=*), parameter :: subname = '(seabed_stress_factor_LKD)' + + ! Assume U location (NE corner) if grid_location not present + if (.not. (present(grid_location))) then + l_grid_location = 'U' + else + l_grid_location = grid_location + endif + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - ! convert quantities to u-location - - hwu = min(hwater(i,j),hwater(i+1,j),hwater(i,j+1),hwater(i+1,j+1)) + ! convert quantities to grid_location + + hwu = grid_neighbor_min(hwater, i, j, l_grid_location) + + docalc_tbu = merge(c1,c0,hwu < threshold_hw) - docalc_tbu = merge(c1,c0,hwu < threshold_hw) - - - au = max(aice(i,j),aice(i+1,j),aice(i,j+1),aice(i+1,j+1)) - hu = max(vice(i,j),vice(i+1,j),vice(i,j+1),vice(i+1,j+1)) + + au = grid_neighbor_max(aice, i, j, l_grid_location) + hu = grid_neighbor_max(vice, i, j, l_grid_location) ! 1- calculate critical thickness hcu = au * hwu / k1 - ! 2- calculate seabed stress factor + ! 2- calculate seabed stress factor Tbu(i,j) = docalc_tbu*k2 * max(c0,(hu - hcu)) * exp(-alphab * (c1 - au)) -! endif - enddo ! ij - end subroutine seabed_stress_factor_LKD + end subroutine seabed_stress_factor_LKD !======================================================================= -! Computes seabed (basal) stress factor Tbu (landfast ice) based on -! probability of contact between the ITD and the seabed. The water depth -! could take into account variations of the SSH. In the simplest +! Computes seabed (basal) stress factor Tbu (landfast ice) based on +! probability of contact between the ITD and the seabed. The water depth +! could take into account variations of the SSH. In the simplest ! formulation, hwater is simply the value of the bathymetry. To calculate -! the probability of contact, it is assumed that the bathymetry follows -! a normal distribution with sigma_b = 2.5d0. An improvement would -! be to provide the distribution based on high resolution data. -! -! Dupont, F. Dumont, D., Lemieux, J.F., Dumas-Lefebvre, E., Caya, A. +! the probability of contact, it is assumed that the bathymetry follows +! a normal distribution with sigma_b = 2.5d0. An improvement would +! be to provide the distribution based on high resolution data. +! +! Dupont, F. Dumont, D., Lemieux, J.F., Dumas-Lefebvre, E., Caya, A. ! in prep. ! ! authors: D. Dumont, J.F. Lemieux, E. Dumas-Lefebvre, F. Dupont @@ -947,34 +1358,50 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & icellt, indxti, indxtj, & icellu, indxui, indxuj, & aicen, vicen, & - hwater, Tbu) + hwater, Tbu, & + TbE, TbN, & + icelle, indxei, indxej, & + icelln, indxni, indxnj) ! use modules - + use ice_arrays_column, only: hin_max use ice_domain_size, only: ncat + use ice_grid, only: grid_neighbor_min, grid_neighbor_max integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt, icellu ! no. of cells where icetmask = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), & - intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction - + icellt, icellu ! no. of cells where ice[tu]mask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj , & ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & hwater ! water depth at tracer location (m) - + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(in) :: & aicen, & ! partial concentration for last thickness category in ITD vicen ! partial volume for last thickness category in ITD (m) - + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - Tbu ! seabed stress factor (N/m^2) + Tbu ! seabed stress factor at U location (N/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout), optional :: & + TbE, & ! seabed stress factor at E location (N/m^2) + TbN ! seabed stress factor at N location (N/m^2) + + integer (kind=int_kind), intent(in), optional :: & + icelle, icelln ! no. of cells where ice[en]mask = 1 -! local variables + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in), optional :: & + indxei , & ! compressed index in i-direction + indxej , & ! compressed index in j-direction + indxni , & ! compressed index in i-direction + indxnj ! compressed index in j-direction + +! local variables integer (kind=int_kind) :: & i, j, ij, ii, n @@ -984,7 +1411,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & ncat_i = 100 ! number of ice thickness categories (log-normal) real (kind=dbl_kind), parameter :: & - max_depth = 50.0_dbl_kind, & ! initial range of log-normal distribution + max_depth = 50.0_dbl_kind, & ! initial range of log-normal distribution mu_s = 0.1_dbl_kind, & ! friction coefficient sigma_b = 2.5d0 ! Standard deviation of bathymetry @@ -996,28 +1423,38 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & real (kind=dbl_kind), dimension(ncat_b) :: & ! normal dist for bathymetry y_n, & ! center of bathymetry categories (m) b_n, & ! probability density function (bathymetry, 1/m) - P_y ! probability for each bathymetry category + P_y ! probability for each bathymetry category real (kind=dbl_kind), dimension(ncat) :: & - vcat, acat + vcat, acat ! vice, aice temporary arrays integer, dimension(ncat_b) :: & - tmp ! Temporary vector tmp = merge(1,0,gt) - + tmp ! Temporary vector tmp = merge(1,0,gt) + logical, dimension (ncat_b) :: & - gt + gt ! + + real (kind=dbl_kind) :: & + wid_i, wid_b , & ! parameters for PDFs + mu_i, sigma_i , & ! + mu_b, m_i, v_i, & ! + atot, x_kmax , & ! + cut , & ! + rhoi, rhow , & ! + gravit , & ! + pi, puny ! - real (kind=dbl_kind) :: wid_i, wid_b, mu_i, sigma_i, mu_b, m_i, v_i ! parameters for PDFs - real (kind=dbl_kind), dimension(ncat_i):: tb_tmp - real (kind=dbl_kind), dimension (nx_block,ny_block):: Tbt ! seabed stress factor at t point (N/m^2) - real (kind=dbl_kind) :: atot, x_kmax - real (kind=dbl_kind) :: cut, rhoi, rhow, gravit, pi, puny + real (kind=dbl_kind), dimension(ncat_i) :: & + tb_tmp - character(len=*), parameter :: subname = '(seabed2_stress_coeff)' + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + Tbt ! seabed stress factor at t point (N/m^2) + + character(len=*), parameter :: subname = '(seabed_stress_factor_prob)' call icepack_query_parameters(rhow_out=rhow, rhoi_out=rhoi) call icepack_query_parameters(gravit_out=gravit) - call icepack_query_parameters(pi_out=pi) + call icepack_query_parameters(pi_out=pi) call icepack_query_parameters(puny_out=puny) Tbt=c0 @@ -1034,7 +1471,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & wid_i = max_depth/ncat_i ! width of ice categories wid_b = c6*sigma_b/ncat_b ! width of bathymetry categories (6 sigma_b = 2x3 sigma_b) - x_k = (/( wid_i*( real(i,kind=dbl_kind) - p5 ), i=1, ncat_i )/) + x_k = (/( wid_i*( real(i,kind=dbl_kind) - p5 ), i=1, ncat_i )/) y_n = (/( ( mu_b-c3*sigma_b )+( real(i,kind=dbl_kind) - p5 )*( c6*sigma_b/ncat_b ), i=1, ncat_b )/) vcat(1:ncat) = vicen(i,j,1:ncat) @@ -1052,12 +1489,12 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & sigma_i = sqrt(log(c1 + v_i/m_i**2)) ! max thickness associated with percentile of log-normal PDF - ! x_kmax=x997 was obtained from an optimization procedure (Dupont et al.) + ! x_kmax=x997 was obtained from an optimization procedure (Dupont et al.) x_kmax = exp(mu_i + sqrt(c2*sigma_i)*1.9430d0) ! Set x_kmax to hlev of the last category where there is ice - ! when there is no ice in the last category + ! when there is no ice in the last category cut = x_k(ncat_i) do n = ncat,-1,1 if (acat(n) < puny) then @@ -1094,25 +1531,48 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & endif enddo - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) - ! convert quantities to u-location - Tbu(i,j) = max(Tbt(i,j),Tbt(i+1,j),Tbt(i,j+1),Tbt(i+1,j+1)) - enddo ! ij - - end subroutine seabed_stress_factor_prob - -!======================================================================= + if (grid_ice == "B") then + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + ! convert quantities to U-location + Tbu(i,j) = grid_neighbor_max(Tbt, i, j, 'U') + enddo ! ij + elseif (grid_ice == "C" .or. grid_ice == "CD") then + if (present(Tbe) .and. present(TbN) .and. & + present(icelle) .and. present(icelln) .and. & + present(indxei) .and. present(indxej) .and. & + present(indxni) .and. present(indxnj)) then + + do ij = 1, icelle + i = indxei(ij) + j = indxej(ij) + ! convert quantities to E-location + TbE(i,j) = grid_neighbor_max(Tbt, i, j, 'E') + enddo + do ij = 1, icelln + i = indxni(ij) + j = indxnj(ij) + ! convert quantities to N-location + TbN(i,j) = grid_neighbor_max(Tbt, i, j, 'N') + enddo + + else + call abort_ice(subname // ' insufficient number of arguments for grid_ice:' // grid_ice) + endif + endif + + end subroutine seabed_stress_factor_prob +!======================================================================= ! Computes principal stresses for comparison with the theoretical -! yield curve; northeast values +! yield curve ! ! author: Elizabeth C. Hunke, LANL subroutine principal_stress(nx_block, ny_block, & - stressp_1, stressm_1, & - stress12_1, strength, & + stressp, stressm, & + stress12, strength, & sig1, sig2, & sigP) @@ -1120,9 +1580,9 @@ subroutine principal_stress(nx_block, ny_block, & nx_block, ny_block ! block dimensions real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - stressp_1 , & ! sigma11 + sigma22 - stressm_1 , & ! sigma11 - sigma22 - stress12_1, & ! sigma12 + stressp , & ! sigma11 + sigma22 + stressm , & ! sigma11 - sigma22 + stress12 , & ! sigma12 strength ! for normalization of sig1 and sig2 real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & @@ -1132,9 +1592,11 @@ subroutine principal_stress(nx_block, ny_block, & ! local variables - integer (kind=int_kind) :: i, j + integer (kind=int_kind) :: & + i, j - real (kind=dbl_kind) :: puny + real (kind=dbl_kind) :: & + puny character(len=*), parameter :: subname = '(principal_stress)' @@ -1146,16 +1608,16 @@ subroutine principal_stress(nx_block, ny_block, & do j = 1, ny_block do i = 1, nx_block if (strength(i,j) > puny) then - ! ice internal pressure - sigP(i,j) = -p5*stressp_1(i,j) - + ! ice internal pressure + sigP(i,j) = -p5*stressp(i,j) + ! normalized principal stresses - sig1(i,j) = (p5*(stressp_1(i,j) & - + sqrt(stressm_1(i,j)**2+c4*stress12_1(i,j)**2))) & + sig1(i,j) = (p5*(stressp(i,j) & + + sqrt(stressm(i,j)**2+c4*stress12(i,j)**2))) & + / strength(i,j) + sig2(i,j) = (p5*(stressp(i,j) & + - sqrt(stressm(i,j)**2+c4*stress12(i,j)**2))) & / strength(i,j) - sig2(i,j) = (p5*(stressp_1(i,j) & - - sqrt(stressm_1(i,j)**2+c4*stress12_1(i,j)**2))) & - / strength(i,j) else sig1(i,j) = spval_dbl sig2(i,j) = spval_dbl @@ -1167,7 +1629,6 @@ subroutine principal_stress(nx_block, ny_block, & end subroutine principal_stress !======================================================================= - ! Compute deformations for mechanical redistribution ! ! author: Elizabeth C. Hunke, LANL @@ -1191,8 +1652,7 @@ subroutine deformations (nx_block, ny_block, & nx_block, ny_block, & ! block dimensions icellt ! no. of cells where icetmask = 1 - integer (kind=int_kind), dimension (nx_block*ny_block), & - intent(in) :: & + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxti , & ! compressed index in i-direction indxtj ! compressed index in j-direction @@ -1206,9 +1666,8 @@ subroutine deformations (nx_block, ny_block, & cym , & ! 0.5*HTE - 1.5*HTW cxm , & ! 0.5*HTN - 1.5*HTS tarear ! 1/tarea - - real (kind=dbl_kind), dimension (nx_block,ny_block), & - intent(inout) :: & + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & shear , & ! strain rate II component (1/s) divu , & ! strain rate I component, velocity divergence (1/s) rdg_conv , & ! convergence term for ridging (1/s) @@ -1227,15 +1686,15 @@ subroutine deformations (nx_block, ny_block, & tmp ! useful combination character(len=*), parameter :: subname = '(deformations)' - + do ij = 1, icellt i = indxti(ij) j = indxtj(ij) - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- call strain_rates (nx_block, ny_block, & i, j, & uvel, vvel, & @@ -1250,9 +1709,9 @@ subroutine deformations (nx_block, ny_block, & shearse, shearsw, & Deltane, Deltanw, & Deltase, Deltasw ) - !----------------------------------------------------------------- - ! deformations for mechanical redistribution - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! deformations for mechanical redistribution + !----------------------------------------------------------------- divu(i,j) = p25*(divune + divunw + divuse + divusw) * tarear(i,j) tmp = p25*(Deltane + Deltanw + Deltase + Deltasw) * tarear(i,j) rdg_conv(i,j) = -min(divu(i,j),c0) @@ -1269,7 +1728,212 @@ subroutine deformations (nx_block, ny_block, & end subroutine deformations !======================================================================= +! Compute deformations for mechanical redistribution at T point +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine deformationsCD_T (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + tarear, & + shear, divu, & + rdg_conv, rdg_shear ) + + use ice_constants, only: p5 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + tarear ! 1/tarea + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + shear , & ! strain rate II component (1/s) + divu , & ! strain rate I component, velocity divergence (1/s) + rdg_conv , & ! convergence term for ridging (1/s) + rdg_shear ! shear term for ridging (1/s) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + divT , & ! divergence at T point + tensionT , & ! tension at T point + shearT , & ! shear at T point + DeltaT ! delt at T point + + real (kind=dbl_kind) :: & + tmp ! useful combination + + character(len=*), parameter :: subname = '(deformations_T)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + call strain_rates_T (nx_block , ny_block , & + icellt , & + indxti(:) , indxtj (:) , & + uvelE (:,:), vvelE (:,:), & + uvelN (:,:), vvelN (:,:), & + dxN (:,:), dyE (:,:), & + dxT (:,:), dyT (:,:), & + divT (:,:), tensionT(:,:), & + shearT(:,:), DeltaT (:,:) ) + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! deformations for mechanical redistribution + !----------------------------------------------------------------- + divu(i,j) = divT(i,j) * tarear(i,j) + tmp = Deltat(i,j) * tarear(i,j) + rdg_conv(i,j) = -min(divu(i,j),c0) + rdg_shear(i,j) = p5*(tmp-abs(divu(i,j))) + + ! diagnostic only + ! shear = sqrt(tension**2 + shearing**2) + shear(i,j) = tarear(i,j)*sqrt( tensionT(i,j)**2 + shearT(i,j)**2 ) + + enddo ! ij + + end subroutine deformationsCD_T + + +!======================================================================= +! Compute deformations for mechanical redistribution at T point +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine deformationsC_T (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + tarear, uarea, & + shearU, & + shear, divu, & + rdg_conv, rdg_shear ) + + use ice_constants, only: p5 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + tarear , & ! 1/tarea + uarea , & ! area of u cell + shearU ! shearU + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + shear , & ! strain rate II component (1/s) + divu , & ! strain rate I component, velocity divergence (1/s) + rdg_conv , & ! convergence term for ridging (1/s) + rdg_shear ! shear term for ridging (1/s) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + divT , & ! divergence at T point + tensionT , & ! tension at T point + shearT , & ! shear at T point + DeltaT ! delt at T point + + real (kind=dbl_kind) :: & + tmp , & ! useful combination + shearTsqr ! strain rates squared at T point + + character(len=*), parameter :: subname = '(deformations_T2)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + call strain_rates_T (nx_block , ny_block , & + icellt , & + indxti(:) , indxtj (:) , & + uvelE (:,:), vvelE (:,:), & + uvelN (:,:), vvelN (:,:), & + dxN (:,:), dyE (:,:), & + dxT (:,:), dyT (:,:), & + divT (:,:), tensionT(:,:), & + shearT(:,:), DeltaT (:,:) ) + + ! DeltaT is calc by strain_rates_T but replaced by calculation below. + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! deformations for mechanical redistribution + !----------------------------------------------------------------- + + shearTsqr = (shearU(i ,j )**2 * uarea(i ,j ) & + + shearU(i ,j-1)**2 * uarea(i ,j-1) & + + shearU(i-1,j-1)**2 * uarea(i-1,j-1) & + + shearU(i-1,j )**2 * uarea(i-1,j )) & + / (uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) + + DeltaT(i,j) = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) + + divu(i,j) = divT(i,j) * tarear(i,j) + tmp = DeltaT(i,j) * tarear(i,j) + rdg_conv(i,j) = -min(divu(i,j),c0) + rdg_shear(i,j) = p5*(tmp-abs(divu(i,j))) + + ! diagnostic only...maybe we dont want to use shearTsqr here???? + ! shear = sqrt(tension**2 + shearing**2) + shear(i,j) = tarear(i,j)*sqrt( tensionT(i,j)**2 + shearT(i,j)**2 ) + + enddo ! ij + + end subroutine deformationsC_T + +!======================================================================= ! Compute strain rates ! ! author: Elizabeth C. Hunke, LANL @@ -1293,10 +1957,10 @@ subroutine strain_rates (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block ! block dimensions - - integer (kind=int_kind) :: & + + integer (kind=int_kind), intent(in) :: & i, j ! indices - + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) vvel , & ! y-component of velocity (m/s) @@ -1306,15 +1970,15 @@ subroutine strain_rates (nx_block, ny_block, & cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW cxm ! 0.5*HTN - 1.5*HTS - + real (kind=dbl_kind), intent(out):: & ! at each corner : divune, divunw, divuse, divusw , & ! divergence tensionne, tensionnw, tensionse, tensionsw, & ! tension shearne, shearnw, shearse, shearsw , & ! shearing Deltane, Deltanw, Deltase, Deltasw ! Delta - + character(len=*), parameter :: subname = '(strain_rates)' - + !----------------------------------------------------------------- ! strain rates ! NOTE these are actually strain rates * area (m^2/s) @@ -1349,7 +2013,7 @@ subroutine strain_rates (nx_block, ny_block, & - cxp(i,j)*uvel(i-1,j-1) + dxt(i,j)*uvel(i-1,j ) shearse = -cym(i,j)*vvel(i ,j-1) - dyt(i,j)*vvel(i-1,j-1) & - cxp(i,j)*uvel(i ,j-1) + dxt(i,j)*uvel(i ,j ) - + ! Delta (in the denominator of zeta, eta) Deltane = sqrt(divune**2 + e_factor*(tensionne**2 + shearne**2)) Deltanw = sqrt(divunw**2 + e_factor*(tensionnw**2 + shearnw**2)) @@ -1358,147 +2022,939 @@ subroutine strain_rates (nx_block, ny_block, & end subroutine strain_rates - !======================================================================= - ! Computes viscous coefficients and replacement pressure for stress - ! calculations. Note that tensile strength is included here. - ! - ! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. - ! Oceanogr., 9, 817-846. - ! - ! Konig Beatty, C. and Holland, D. M. (2010). Modeling landfast ice by - ! adding tensile strength. J. Phys. Oceanogr. 40, 185-198. - ! - ! Lemieux, J. F. et al. (2016). Improving the simulation of landfast ice - ! by combining tensile strength and a parameterization for grounded ridges. - ! J. Geophys. Res. Oceans, 121, 7354-7368. - - subroutine viscous_coeffs_and_rep_pressure (strength, tinyarea, & - Deltane, Deltanw, & - Deltasw, Deltase, & - zetax2ne, zetax2nw, & - zetax2sw, zetax2se, & - etax2ne, etax2nw, & - etax2sw, etax2se, & - rep_prsne, rep_prsnw,& - rep_prssw, rep_prsse,& - capping) +!======================================================================= +! Compute dtsd (div, tension, shear, delta) strain rates at the T point +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine strain_rates_Tdtsd (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT, & + shearT, DeltaT ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT ! height of T-cell through the middle (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + divT , & ! divergence at T point + tensionT , & ! tension at T point + shearT , & ! shear at T point + DeltaT ! strain rates at the T point + + ! local variables + + integer (kind=int_kind) :: & + ij, i, j ! indices + + character(len=*), parameter :: subname = '(strain_rates_Tdtsd)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + ! compute divT, tensionT + call strain_rates_Tdt (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT ) + + shearT (:,:) = c0 + deltaT (:,:) = c0 + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + ! shearing strain rate = 2*e_12 + shearT(i,j) = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & + + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) + + ! Delta (in the denominator of zeta, eta) + DeltaT(i,j) = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearT(i,j)**2)) + + enddo + + end subroutine strain_rates_Tdtsd + +!======================================================================= +! Compute the dt (div, tension) strain rates at the T point +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine strain_rates_Tdt (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT ! height of T-cell through the middle (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + divT , & ! divergence at T point + tensionT ! tension at T point + + ! local variables + + integer (kind=int_kind) :: & + ij, i, j ! indices + + character(len=*), parameter :: subname = '(strain_rates_Tdt)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + divT (:,:) = c0 + tensionT(:,:) = c0 + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + ! divergence = e_11 + e_22 + divT (i,j)= dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & + + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) + + ! tension strain rate = e_11 - e_22 + tensionT(i,j) = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & + - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) + + enddo + + end subroutine strain_rates_Tdt + +!======================================================================= +! Compute strain rates at the U point including boundary conditions +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine strain_rates_U (nx_block, ny_block, & + icellu, & + indxui, indxuj, & + uvelE, vvelE, & + uvelN, vvelN, & + uvelU, vvelU, & + dxE, dyN, & + dxU, dyU, & + ratiodxN, ratiodxNr, & + ratiodyE, ratiodyEr, & + epm, npm, & + divergU, tensionU, & + shearU, DeltaU ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellu + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + uvelU , & ! x-component of velocity (m/s) interp. at U point + vvelU , & ! y-component of velocity (m/s) interp. at U point + dxE , & ! width of E-cell through the middle (m) + dyN , & ! height of N-cell through the middle (m) + dxU , & ! width of U-cell through the middle (m) + dyU , & ! height of U-cell through the middle (m) + ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) for BCs + ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) for BCs + ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) for BCs + ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) for BCs + epm , & ! E-cell mask + npm ! N-cell mask + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + divergU , & ! divergence at U point + tensionU , & ! tension at U point + shearU , & ! shear at U point + DeltaU ! delt at the U point + + ! local variables + + integer (kind=int_kind) :: & + ij, i, j ! indices + + real (kind=dbl_kind) :: & + uNip1j, uNij, vEijp1, vEij, uEijp1, uEij, vNip1j, vNij + + character(len=*), parameter :: subname = '(strain_rates_U)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + divergU (:,:) = c0 + tensionU(:,:) = c0 + shearU (:,:) = c0 + deltaU (:,:) = c0 + + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + + uNip1j = uvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) + uNij = uvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) + vEijp1 = vvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) + vEij = vvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) + + ! divergence = e_11 + e_22 + divergU (i,j) = dyU(i,j) * ( uNip1j - uNij ) & + + uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + + dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + + ! tension strain rate = e_11 - e_22 + tensionU(i,j) = dyU(i,j) * ( uNip1j - uNij ) & + - uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + - dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + + uEijp1 = uvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) + uEij = uvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) + vNip1j = vvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) + vNij = vvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) + + ! shearing strain rate = 2*e_12 + shearU(i,j) = dxU(i,j) * ( uEijp1 - uEij ) & + - uvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + + dyU(i,j) * ( vNip1j - vNij ) & + - vvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + + ! Delta (in the denominator of zeta, eta) + DeltaU(i,j) = sqrt(divergU(i,j)**2 + e_factor*(tensionU(i,j)**2 + shearU(i,j)**2)) + + enddo + + end subroutine strain_rates_U + +!======================================================================= +! Computes viscosities and replacement pressure for stress +! calculations. Note that tensile strength is included here. +! +! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. +! Oceanogr., 9, 817-846. +! +! Konig Beatty, C. and Holland, D. M. (2010). Modeling landfast ice by +! adding tensile strength. J. Phys. Oceanogr. 40, 185-198. +! +! Lemieux, J. F. et al. (2016). Improving the simulation of landfast ice +! by combining tensile strength and a parameterization for grounded ridges. +! J. Geophys. Res. Oceans, 121, 7354-7368. + + subroutine visc_replpress(strength, DminArea, Delta, & + zetax2, etax2, rep_prs, capping) + + real (kind=dbl_kind), intent(in):: & + strength, & ! + DminArea ! real (kind=dbl_kind), intent(in):: & - strength, tinyarea ! at the t-point - - real (kind=dbl_kind), intent(in):: & - Deltane, Deltanw, Deltasw, Deltase ! Delta at each corner - - real(kind=dbl_kind) , intent(in):: capping - - real (kind=dbl_kind), intent(out):: & - zetax2ne, zetax2nw, zetax2sw, zetax2se, & ! zetax2 at each corner - etax2ne, etax2nw, etax2sw, etax2se, & ! etax2 at each corner - rep_prsne, rep_prsnw, rep_prssw, rep_prsse ! replacement pressure + Delta , & ! + capping ! + + real (kind=dbl_kind), intent(out):: & + zetax2 , & ! bulk viscosity + etax2 , & ! shear viscosity + rep_prs ! replacement pressure ! local variables real (kind=dbl_kind) :: & - tmpcalcne, tmpcalcnw, tmpcalcsw, tmpcalcse + tmpcalc ! temporary + + character(len=*), parameter :: subname = '(visc_replpress)' ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - ! if (trim(yield_curve) == 'ellipse') then - tmpcalcne = capping *(strength/max(Deltane, tinyarea))+ & - (c1-capping)* strength/ (Deltane+ tinyarea) - tmpcalcnw = capping *(strength/max(Deltanw, tinyarea))+ & - (c1-capping)* strength/ (Deltanw+ tinyarea) - tmpcalcsw = capping *(strength/max(Deltasw, tinyarea))+ & - (c1-capping)* strength/ (Deltasw+ tinyarea) - tmpcalcse = capping *(strength/max(Deltase, tinyarea))+ & - (c1-capping)* strength/ (Deltase+ tinyarea) - - zetax2ne = (c1+Ktens)*tmpcalcne ! northeast - rep_prsne = (c1-Ktens)*tmpcalcne*Deltane - etax2ne = epp2i*zetax2ne - - zetax2nw = (c1+Ktens)*tmpcalcnw ! northwest - rep_prsnw = (c1-Ktens)*tmpcalcnw*Deltanw - etax2nw = epp2i*zetax2nw + tmpcalc = capping *(strength/max(Delta,DminArea))+ & + (c1-capping)*(strength/(Delta + DminArea)) + zetax2 = (c1+Ktens)*tmpcalc + rep_prs = (c1-Ktens)*tmpcalc*Delta + etax2 = epp2i*zetax2 - zetax2sw = (c1+Ktens)*tmpcalcsw ! southwest - rep_prssw = (c1-Ktens)*tmpcalcsw*Deltasw - etax2sw = epp2i*zetax2sw - - zetax2se = (c1+Ktens)*tmpcalcse ! southeast - rep_prsse = (c1-Ktens)*tmpcalcse*Deltase - etax2se = epp2i*zetax2se - ! else + end subroutine visc_replpress + +!======================================================================= +! Do a halo update on 1 field + + subroutine dyn_haloUpdate1(halo_info, halo_info_mask, field_loc, field_type, fld1) + + use ice_boundary, only: ice_halo, ice_HaloUpdate + use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + type (ice_halo), intent(in) :: & + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo + + integer (kind=int_kind), intent(in) :: & + field_loc , & ! field loc + field_type ! field_type + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & + fld1 ! fields to halo + + ! local variables + + integer (kind=int_kind) :: & + iblk ! iblock + + real (kind=dbl_kind), dimension (nx_block,ny_block,1,max_blocks) :: & + fldbundle ! work array for boundary updates + + character(len=*), parameter :: subname = '(dyn_haloUpdate1)' + + call ice_timer_start(timer_bound) + + if (maskhalo_dyn) then + call ice_HaloUpdate (fld1 , halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fld1 , halo_info , & + field_loc, field_type) + endif + + call ice_timer_stop(timer_bound) + + end subroutine dyn_haloUpdate1 + +!======================================================================= +! Do a halo update on 2 fields + + subroutine dyn_haloUpdate2(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2) + + use ice_boundary, only: ice_halo, ice_HaloUpdate + use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + type (ice_halo), intent(in) :: & + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo + + integer (kind=int_kind), intent(in) :: & + field_loc , & ! field loc + field_type ! field_type + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & + fld1 , & ! fields to halo + fld2 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! iblock + + real (kind=dbl_kind), dimension (nx_block,ny_block,2,max_blocks) :: & + fldbundle ! work array for boundary updates + + character(len=*), parameter :: subname = '(dyn_haloUpdate2)' + + call ice_timer_start(timer_bound) + ! single process performs better without bundling fields + if (halo_dynbundle) then + + call stack_fields(fld1, fld2, fldbundle) + if (maskhalo_dyn) then + call ice_HaloUpdate (fldbundle, halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fldbundle, halo_info , & + field_loc, field_type) + endif + call unstack_fields(fldbundle, fld1, fld2) + + else + + if (maskhalo_dyn) then + call ice_HaloUpdate (fld1 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fld1 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info , & + field_loc, field_type) + endif + + endif + call ice_timer_stop(timer_bound) + + end subroutine dyn_haloUpdate2 + +!======================================================================= +! Do a halo update on 3 fields + + subroutine dyn_haloUpdate3(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3) + + use ice_boundary, only: ice_halo, ice_HaloUpdate + use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + type (ice_halo), intent(in) :: & + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo + + integer (kind=int_kind), intent(in) :: & + field_loc , & ! field loc + field_type ! field_type + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & + fld1 , & ! fields to halo + fld2 , & ! + fld3 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! iblock + + real (kind=dbl_kind), dimension (nx_block,ny_block,3,max_blocks) :: & + fldbundle ! work array for boundary updates + + character(len=*), parameter :: subname = '(dyn_haloUpdate3)' + + call ice_timer_start(timer_bound) + ! single process performs better without bundling fields + if (halo_dynbundle) then + + call stack_fields(fld1, fld2, fld3, fldbundle) + if (maskhalo_dyn) then + call ice_HaloUpdate (fldbundle, halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fldbundle, halo_info , & + field_loc, field_type) + endif + call unstack_fields(fldbundle, fld1, fld2, fld3) + + else + + if (maskhalo_dyn) then + call ice_HaloUpdate (fld1 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fld1 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info , & + field_loc, field_type) + endif + + endif + call ice_timer_stop(timer_bound) - ! endif - - end subroutine viscous_coeffs_and_rep_pressure + end subroutine dyn_haloUpdate3 !======================================================================= +! Do a halo update on 4 fields -! Load velocity components into array for boundary updates + subroutine dyn_haloUpdate4(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3, fld4) + + use ice_boundary, only: ice_halo, ice_HaloUpdate + use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + type (ice_halo), intent(in) :: & + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo + + integer (kind=int_kind), intent(in) :: & + field_loc, & ! field loc + field_type ! field_type + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & + fld1 , & ! fields to halo + fld2 , & ! + fld3 , & ! + fld4 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! iblock + + real (kind=dbl_kind), dimension (nx_block,ny_block,4,max_blocks) :: & + fldbundle ! work array for boundary updates + + character(len=*), parameter :: subname = '(dyn_haloUpdate4)' + + call ice_timer_start(timer_bound) + ! single process performs better without bundling fields + if (halo_dynbundle) then + + call stack_fields(fld1, fld2, fld3, fld4, fldbundle) + if (maskhalo_dyn) then + call ice_HaloUpdate (fldbundle, halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fldbundle, halo_info , & + field_loc, field_type) + endif + call unstack_fields(fldbundle, fld1, fld2, fld3, fld4) + + else + + if (maskhalo_dyn) then + call ice_HaloUpdate (fld1 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld4 , halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fld1 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld4 , halo_info , & + field_loc, field_type) + endif + + endif + call ice_timer_stop(timer_bound) + + end subroutine dyn_haloUpdate4 + +!======================================================================= +! Do a halo update on 5 fields + + subroutine dyn_haloUpdate5(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3, fld4, fld5) + + use ice_boundary, only: ice_halo, ice_HaloUpdate + use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + type (ice_halo), intent(in) :: & + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo + + integer (kind=int_kind), intent(in) :: & + field_loc , & ! field loc + field_type ! field_type + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & + fld1 , & ! fields to halo + fld2 , & ! + fld3 , & ! + fld4 , & ! + fld5 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! iblock - subroutine stack_velocity_field(uvel, vvel, fld2) + real (kind=dbl_kind), dimension (nx_block,ny_block,5,max_blocks) :: & + fldbundle ! work array for boundary updates + + character(len=*), parameter :: subname = '(dyn_haloUpdate5)' + + call ice_timer_start(timer_bound) + ! single process performs better without bundling fields + if (halo_dynbundle) then + + call stack_fields(fld1, fld2, fld3, fld4, fld5, fldbundle) + if (maskhalo_dyn) then + call ice_HaloUpdate (fldbundle, halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fldbundle, halo_info , & + field_loc, field_type) + endif + call unstack_fields(fldbundle, fld1, fld2, fld3, fld4, fld5) + + else + + if (maskhalo_dyn) then + call ice_HaloUpdate (fld1 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld4 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld5 , halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fld1 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld4 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld5 , halo_info , & + field_loc, field_type) + endif + + endif + call ice_timer_stop(timer_bound) + + end subroutine dyn_haloUpdate5 + +!======================================================================= +! Load fields into array for boundary updates + + subroutine stack_fields2(fld1, fld2, fldbundle) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & + fld1 , & ! fields to stack + fld2 ! + + real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(stack_fields2)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fldbundle(:,:,1,iblk) = fld1(:,:,iblk) + fldbundle(:,:,2,iblk) = fld2(:,:,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine stack_fields2 + +!======================================================================= +! Load fields into array for boundary updates + + subroutine stack_fields3(fld1, fld2, fld3, fldbundle) use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & - uvel , & ! u components of velocity vector - vvel ! v components of velocity vector + real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & + fld1 , & ! fields to stack + fld2 , & ! + fld3 ! - real (kind=dbl_kind), dimension (nx_block,ny_block,2,max_blocks), intent(out) :: & - fld2 ! work array for boundary updates + real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) ! local variables integer (kind=int_kind) :: & iblk ! block index - character(len=*), parameter :: subname = '(stack_velocity_field)' + character(len=*), parameter :: subname = '(stack_fields3)' - ! load velocity into array for boundary updates + call ice_timer_start(timer_bundbound) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - fld2(:,:,1,iblk) = uvel(:,:,iblk) - fld2(:,:,2,iblk) = vvel(:,:,iblk) + fldbundle(:,:,1,iblk) = fld1(:,:,iblk) + fldbundle(:,:,2,iblk) = fld2(:,:,iblk) + fldbundle(:,:,3,iblk) = fld3(:,:,iblk) enddo !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) - end subroutine stack_velocity_field + end subroutine stack_fields3 !======================================================================= +! Load fields into array for boundary updates -! Unload velocity components from array after boundary updates + subroutine stack_fields4(fld1, fld2, fld3, fld4, fldbundle) - subroutine unstack_velocity_field(fld2, uvel, vvel) + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & + fld1 , & ! fields to stack + fld2 , & ! + fld3 , & ! + fld4 ! + + real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(stack_fields4)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fldbundle(:,:,1,iblk) = fld1(:,:,iblk) + fldbundle(:,:,2,iblk) = fld2(:,:,iblk) + fldbundle(:,:,3,iblk) = fld3(:,:,iblk) + fldbundle(:,:,4,iblk) = fld4(:,:,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine stack_fields4 + +!======================================================================= +! Load fields into array for boundary updates + + subroutine stack_fields5(fld1, fld2, fld3, fld4, fld5, fldbundle) use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - real (kind=dbl_kind), dimension (nx_block,ny_block,2,max_blocks), intent(in) :: & - fld2 ! work array for boundary updates + real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & + fld1 , & ! fields to stack + fld2 , & ! + fld3 , & ! + fld4 , & ! + fld5 ! - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(out) :: & - uvel , & ! u components of velocity vector - vvel ! v components of velocity vector + real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) ! local variables integer (kind=int_kind) :: & iblk ! block index - character(len=*), parameter :: subname = '(unstack_velocity_field)' + character(len=*), parameter :: subname = '(stack_fields5)' - ! Unload velocity from array after boundary updates + call ice_timer_start(timer_bundbound) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - uvel(:,:,iblk) = fld2(:,:,1,iblk) - vvel(:,:,iblk) = fld2(:,:,2,iblk) + fldbundle(:,:,1,iblk) = fld1(:,:,iblk) + fldbundle(:,:,2,iblk) = fld2(:,:,iblk) + fldbundle(:,:,3,iblk) = fld3(:,:,iblk) + fldbundle(:,:,4,iblk) = fld4(:,:,iblk) + fldbundle(:,:,5,iblk) = fld5(:,:,iblk) enddo !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) - end subroutine unstack_velocity_field + end subroutine stack_fields5 !======================================================================= - +! Unload fields from array after boundary updates + + subroutine unstack_fields2(fldbundle, fld1, fld2) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & + fld1 , & ! fields to unstack + fld2 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(unstack_fields2)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fld1(:,:,iblk) = fldbundle(:,:,1,iblk) + fld2(:,:,iblk) = fldbundle(:,:,2,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine unstack_fields2 + +!======================================================================= +! Unload fields from array after boundary updates + + subroutine unstack_fields3(fldbundle, fld1, fld2, fld3) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & + fld1 , & ! fields to unstack + fld2 , & ! + fld3 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(unstack_fields3)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fld1(:,:,iblk) = fldbundle(:,:,1,iblk) + fld2(:,:,iblk) = fldbundle(:,:,2,iblk) + fld3(:,:,iblk) = fldbundle(:,:,3,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine unstack_fields3 + +!======================================================================= +! Unload fields from array after boundary updates + + subroutine unstack_fields4(fldbundle, fld1, fld2, fld3, fld4) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & + fld1 , & ! fields to unstack + fld2 , & ! + fld3 , & ! + fld4 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(unstack_fields4)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fld1(:,:,iblk) = fldbundle(:,:,1,iblk) + fld2(:,:,iblk) = fldbundle(:,:,2,iblk) + fld3(:,:,iblk) = fldbundle(:,:,3,iblk) + fld4(:,:,iblk) = fldbundle(:,:,4,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine unstack_fields4 + +!======================================================================= +! Unload fields from array after boundary updates + + subroutine unstack_fields5(fldbundle, fld1, fld2, fld3, fld4, fld5) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & + fld1 , & ! fields to unstack + fld2 , & ! + fld3 , & ! + fld4 , & ! + fld5 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(unstack_fields5)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fld1(:,:,iblk) = fldbundle(:,:,1,iblk) + fld2(:,:,iblk) = fldbundle(:,:,2,iblk) + fld3(:,:,iblk) = fldbundle(:,:,3,iblk) + fld4(:,:,iblk) = fldbundle(:,:,4,iblk) + fld5(:,:,iblk) = fldbundle(:,:,5,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine unstack_fields5 + +!======================================================================= + end module ice_dyn_shared !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index a9da7e300..4796669e9 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -48,7 +48,7 @@ module ice_dyn_vp use ice_dyn_shared, only: dyn_prep1, dyn_prep2, dyn_finish, & cosw, sinw, fcor_blk, uvel_init, vvel_init, & seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & - seabed_stress, Ktens, stack_velocity_field, unstack_velocity_field + seabed_stress, Ktens, stack_fields, unstack_fields use ice_fileunits, only: nu_diag use ice_flux, only: fm use ice_global_reductions, only: global_sum, global_allreduce_sum @@ -103,7 +103,7 @@ module ice_dyn_vp indxui(:,:) , & ! compressed index in i-direction indxuj(:,:) ! compressed index in j-direction - real (kind=dbl_kind), allocatable :: & + real (kind=dbl_kind), allocatable :: & fld2(:,:,:,:) ! work array for boundary updates !======================================================================= @@ -122,7 +122,7 @@ subroutine init_vp use ice_constants, only: c1, & field_loc_center, field_type_scalar use ice_domain, only: blocks_ice, halo_info - use ice_grid, only: tarea, tinyarea +! use ice_grid, only: tarea ! local variables @@ -133,9 +133,6 @@ subroutine init_vp type (block) :: & this_block ! block information for current block - real (kind=dbl_kind) :: & - min_strain_rate = 2e-09_dbl_kind ! used for recomputing tinyarea - ! Initialize module variables allocate(icellt(max_blocks), icellu(max_blocks)) allocate(indxti(nx_block*ny_block, max_blocks), & @@ -143,29 +140,7 @@ subroutine init_vp indxui(nx_block*ny_block, max_blocks), & indxuj(nx_block*ny_block, max_blocks)) allocate(fld2(nx_block,ny_block,2,max_blocks)) - - ! Redefine tinyarea using min_strain_rate - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - tinyarea(i,j,iblk) = min_strain_rate*tarea(i,j,iblk) - enddo - enddo - enddo ! iblk - !$OMP END PARALLEL DO - - call ice_HaloUpdate (tinyarea, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) - end subroutine init_vp !======================================================================= @@ -200,8 +175,8 @@ subroutine implicit_solver (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, cxp, cyp, cxm, cym, & - tarear, to_ugrid, t2ugrid_vector, u2tgrid_vector, & - grid_type + tarear, grid_type, grid_average_X2Y, & + grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & @@ -219,6 +194,8 @@ subroutine implicit_solver (dt) i, j, ij real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnU , & ! i ocean current (m/s) + vocnU , & ! j ocean current (m/s) tmass , & ! total mass of ice and snow (kg/m^2) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) @@ -234,10 +211,10 @@ subroutine implicit_solver (dt) umassdti ! mass of U-cell/dte (kg/m^2 s) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4):: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 , & ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 , & ! etax2 = 2*eta (shear viscosity) rep_prs ! replacement pressure - + logical (kind=log_kind) :: calc_strair integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & @@ -253,6 +230,10 @@ subroutine implicit_solver (dt) real (kind=dbl_kind), allocatable :: & sol(:) ! solution vector + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + character(len=*), parameter :: subname = '(implicit_solver)' call ice_timer_start(timer_dynamics) ! dynamics @@ -260,7 +241,7 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- ! Initialize !----------------------------------------------------------------- - + ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) @@ -305,8 +286,6 @@ subroutine implicit_solver (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - strairxT(:,:,iblk), strairyT(:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) enddo ! iblk @@ -321,8 +300,10 @@ subroutine implicit_solver (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call to_ugrid(tmass,umass) - call to_ugrid(aice_init, aiu) + call grid_average_X2Y('F',tmass,'T',umass,'U') + call grid_average_X2Y('F',aice_init,'T', aiu,'U') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnU,'U') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnU,'U') !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -334,11 +315,15 @@ subroutine implicit_solver (dt) file=__FILE__, line=__LINE__) if (.not. calc_strair) then - strairx(:,:,:) = strax(:,:,:) - strairy(:,:,:) = stray(:,:,:) + call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') else - call t2ugrid_vector(strairx) - call t2ugrid_vector(strairy) + call ice_HaloUpdate (strairxT, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairyT, halo_info, & + field_loc_center, field_type_vector) + call grid_average_X2Y('F',strairxT,'T',strairx,'U') + call grid_average_X2Y('F',strairyT,'T',strairy,'U') endif ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength @@ -364,7 +349,7 @@ subroutine implicit_solver (dt) aiu (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & @@ -392,7 +377,7 @@ subroutine implicit_solver (dt) forcex (:,:,iblk), forcey (:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & bxfix (:,:,iblk), byfix (:,:,iblk)) - + !----------------------------------------------------------------- ! ice strength !----------------------------------------------------------------- @@ -421,10 +406,10 @@ subroutine implicit_solver (dt) call ice_HaloUpdate (strength, halo_info, & field_loc_center, field_type_scalar) ! velocities may have changed in dyn_prep2 - call stack_velocity_field(uvel, vvel, fld2) + call stack_fields(uvel, vvel, fld2) call ice_HaloUpdate (fld2, halo_info, & field_loc_NEcorner, field_type_vector) - call unstack_velocity_field(fld2, uvel, vvel) + call unstack_fields(fld2, uvel, vvel) call ice_timer_stop(timer_bound) if (maskhalo_dyn) then @@ -440,7 +425,6 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- ! seabed stress factor Tbu (Tbu is part of Cb coefficient) !----------------------------------------------------------------- - if (seabed_stress) then if ( seabed_stress_method == 'LKD' ) then !$OMP PARALLEL DO PRIVATE(iblk) @@ -467,20 +451,20 @@ subroutine implicit_solver (dt) endif endif - - + + !----------------------------------------------------------------- ! calc size of problem (ntot) and allocate solution vector !----------------------------------------------------------------- - + ntot = 0 do iblk = 1, nblocks ntot = ntot + icellu(iblk) enddo ntot = 2 * ntot ! times 2 because of u and v - + allocate(sol(ntot)) - + !----------------------------------------------------------------- ! Start of nonlinear iteration !----------------------------------------------------------------- @@ -488,6 +472,7 @@ subroutine implicit_solver (dt) indxti , indxtj, & indxui , indxuj, & aiu , ntot , & + uocnU , vocnU , & waterx , watery, & bxfix , byfix , & umassdti, sol , & @@ -500,7 +485,7 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- deallocate(sol) - + if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) !----------------------------------------------------------------- @@ -543,7 +528,7 @@ subroutine implicit_solver (dt) rdg_conv (:,:,iblk), rdg_shear (:,:,iblk)) enddo !$OMP END PARALLEL DO - + !----------------------------------------------------------------- ! Compute seabed stress (diagnostic) !----------------------------------------------------------------- @@ -559,7 +544,7 @@ subroutine implicit_solver (dt) enddo !$OMP END PARALLEL DO endif - + ! Force symmetry across the tripole seam if (trim(grid_type) == 'tripole') then if (maskhalo_dyn) then @@ -640,17 +625,44 @@ subroutine implicit_solver (dt) icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strocnxT(:,:,iblk), strocnyT(:,:,iblk)) +! strintx (:,:,iblk), strinty (:,:,iblk), & +! strairx (:,:,iblk), strairy (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk)) enddo !$OMP END PARALLEL DO - call u2tgrid_vector(strocnxT) ! shift - call u2tgrid_vector(strocnyT) - + ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! TODO: This should be done elsewhere as part of generalization? + ! conservation requires aiu be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) + do iblk = 1, nblocks + do ij = 1, icellu(iblk) + i = indxui(ij,iblk) + j = indxuj(ij,iblk) + work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) + work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + enddo + enddo + call ice_HaloUpdate (work1, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_HaloUpdate (work2, halo_info, & + field_loc_NEcorner, field_type_vector) + call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift + call grid_average_X2Y('F',work2,'U',strocnyT,'T') + +! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport +! commented out in order to focus on EVP for now within the cdgrid +! should be used when routine is ready +! if (grid_ice == 'CD' .or. grid_ice == 'C') then +! call grid_average_X2Y('E2US',uvelE,uvel) +! call grid_average_X2Y('N2US',vvelN,vvel) +! endif +!end comment out call ice_timer_stop(timer_dynamics) ! dynamics end subroutine implicit_solver @@ -671,6 +683,7 @@ subroutine anderson_solver (icellt , icellu, & indxti , indxtj, & indxui , indxuj, & aiu , ntot , & + uocn , vocn , & waterx , watery, & bxfix , byfix , & umassdti, sol , & @@ -685,9 +698,10 @@ subroutine anderson_solver (icellt , icellu, & use ice_constants, only: c1 use ice_domain, only: maskhalo_dyn, halo_info use ice_domain_size, only: max_blocks - use ice_flux, only: uocn, vocn, fm, Tbu + use ice_flux, only: fm, Tbu use ice_grid, only: dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & - uarear, tinyarea + uarear + use ice_dyn_shared, only: DminTarea use ice_state, only: uvel, vvel, strength use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound @@ -706,6 +720,8 @@ subroutine anderson_solver (icellt , icellu, & real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & aiu , & ! ice fraction on u-grid + uocn , & ! i ocean current (m/s) + vocn , & ! j ocean current (m/s) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) bxfix , & ! part of bx that is constant during Picard @@ -713,10 +729,10 @@ subroutine anderson_solver (icellt , icellu, & umassdti ! mass of U-cell/dte (kg/m^2 s) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(out) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 , & ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 , & ! etax2 = 2*eta (shear viscosity) rep_prs ! replacement pressure - + type (ice_halo), intent(in) :: & halo_info_mask ! ghost cell update info for masked halo @@ -800,14 +816,14 @@ subroutine anderson_solver (icellt , icellu, & ! Initialization res_num = 0 L2norm = c0 - + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks uprev_k(:,:,iblk) = uvel(:,:,iblk) vprev_k(:,:,iblk) = vvel(:,:,iblk) enddo !$OMP END PARALLEL DO - + ! Start iterations do it_nl = 0, maxits_nonlin ! nonlinear iteration loop ! Compute quantities needed for computing PDE residual and g(x) (fixed point map) @@ -826,7 +842,7 @@ subroutine anderson_solver (icellt , icellu, & endif uprev_k(:,:,iblk) = uvel(:,:,iblk) vprev_k(:,:,iblk) = vvel(:,:,iblk) - + call calc_zeta_dPr (nx_block , ny_block , & icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & @@ -835,10 +851,10 @@ subroutine anderson_solver (icellt , icellu, & dxhy (:,:,iblk), dyhx (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & - tinyarea (:,:,iblk), strength (:,:,iblk),& + DminTarea (:,:,iblk),strength (:,:,iblk),& zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:),& rep_prs(:,:,iblk,:), stress_Pr (:,:,:)) - + call calc_vrel_Cb (nx_block , ny_block , & icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & @@ -846,7 +862,7 @@ subroutine anderson_solver (icellt , icellu, & uocn (:,:,iblk), vocn (:,:,iblk), & ulin (:,:,iblk), vlin (:,:,iblk), & vrel (:,:,iblk), Cb (:,:,iblk)) - + ! prepare b vector (RHS) call calc_bvec (nx_block , ny_block , & icellu (iblk), & @@ -856,7 +872,7 @@ subroutine anderson_solver (icellt , icellu, & bxfix (:,:,iblk), byfix (:,:,iblk), & bx (:,:,iblk), by (:,:,iblk), & vrel (:,:,iblk)) - + ! Compute nonlinear residual norm (PDE residual) call matvec (nx_block , ny_block , & icellu (iblk) , icellt (iblk), & @@ -890,12 +906,12 @@ subroutine anderson_solver (icellt , icellu, & if (it_nl == 0) then tol_nl = reltol_nonlin*nlres_norm endif - + ! Check for nonlinear convergence if (nlres_norm < tol_nl) then exit endif - + ! Put initial guess for FGMRES in solx,soly and sol (needed for anderson) solx = uprev_k soly = vprev_k @@ -905,11 +921,11 @@ subroutine anderson_solver (icellt , icellu, & indxui (:,:), indxuj (:,:), & uprev_k (:,:,:), vprev_k (:,:,:), & sol (:)) - + ! Compute fixed point map g(x) if (fpfunc_andacc == 1) then ! g_1(x) = FGMRES(A(x), b(x)) - + ! Prepare diagonal for preconditioner if (precond == 'diag' .or. precond == 'pgmres') then !$OMP PARALLEL DO PRIVATE(iblk,diag_rheo) @@ -935,7 +951,7 @@ subroutine anderson_solver (icellt , icellu, & enddo !$OMP END PARALLEL DO endif - + ! FGMRES linear solver call fgmres (zetax2 , etax2 , & Cb , vrel , & @@ -986,13 +1002,13 @@ subroutine anderson_solver (icellt , icellu, & write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & " fixed_point_res_L2norm= ", fpres_norm endif - + ! Not used for now (only nonlinear residual is checked) ! ! Store initial residual norm ! if (it_nl == 0) then ! tol = reltol_andacc*fpres_norm ! endif - ! + ! ! ! Check residual ! if (fpres_norm < tol) then ! exit @@ -1050,7 +1066,7 @@ subroutine anderson_solver (icellt , icellu, & endif ! TODO: here, drop more columns to improve conditioning ! if (droptol) then - + ! endif ! Solve least square problem for coefficients ! 1. Compute rhs_tri = Q^T * res @@ -1064,7 +1080,7 @@ subroutine anderson_solver (icellt , icellu, & ! Apply damping if (damping_andacc > 0 .and. damping_andacc /= 1) then ! x = x - (1-beta) (res - Q*R*coeffs) - + ! tmp = R*coeffs call dgemv ('n', res_num, res_num, c1, R(1:res_num,1:res_num), res_num, coeffs, inc, c0, tmp, inc) ! res = res - Q*tmp @@ -1079,7 +1095,7 @@ subroutine anderson_solver (icellt , icellu, & file=__FILE__, line=__LINE__) #endif endif - + !----------------------------------------------------------------------- ! Put vector sol in uvel and vvel arrays !----------------------------------------------------------------------- @@ -1089,9 +1105,9 @@ subroutine anderson_solver (icellt , icellu, & indxui (:,:), indxuj (:,:), & sol (:), & uvel (:,:,:), vvel (:,:,:)) - + ! Do halo update so that halo cells contain up to date info for advection - call stack_velocity_field(uvel, vvel, fld2) + call stack_fields(uvel, vvel, fld2) call ice_timer_start(timer_bound) if (maskhalo_dyn) then call ice_HaloUpdate (fld2, halo_info_mask, & @@ -1101,8 +1117,8 @@ subroutine anderson_solver (icellt , icellu, & field_loc_NEcorner, field_type_vector) endif call ice_timer_stop(timer_bound) - call unstack_velocity_field(fld2, uvel, vvel) - + call unstack_fields(fld2, uvel, vvel) + ! Compute "progress" residual norm !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -1120,14 +1136,14 @@ subroutine anderson_solver (icellt , icellu, & write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & " progress_res_L2norm= ", prog_norm endif - + enddo ! nonlinear iteration loop - + end subroutine anderson_solver !======================================================================= -! Computes the viscous coefficients and dPr/dx, dPr/dy +! Computes the viscosities and dPr/dx, dPr/dy subroutine calc_zeta_dPr (nx_block, ny_block, & icellt , & @@ -1137,11 +1153,12 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & dxhy , dyhx , & cxp , cyp , & cxm , cym , & - tinyarea, strength, & + DminTarea,strength, & zetax2 , etax2 , & rep_prs , stPr) - use ice_dyn_shared, only: strain_rates, viscous_coeffs_and_rep_pressure + use ice_dyn_shared, only: strain_rates, visc_replpress, & + capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1163,13 +1180,13 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW cxm , & ! 0.5*HTN - 1.5*HTS - tinyarea ! min_strain_rate*tarea + DminTarea ! deltaminVP*tarea real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(out) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 , & ! etax2 = 2*eta (shear viscous coeff) - rep_prs ! replacement pressure - + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 , & ! etax2 = 2*eta (shear viscosity) + rep_prs ! replacement pressure + real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(out) :: & stPr ! stress combinations from replacement pressure @@ -1188,8 +1205,6 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & stressp_1, stressp_2, stressp_3, stressp_4 , & strp_tmp - real(kind=dbl_kind),parameter :: capping = c0 ! of the viscous coef - character(len=*), parameter :: subname = '(calc_zeta_dPr)' ! Initialize stPr, zetax2 and etax2 to zero @@ -1221,21 +1236,27 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & Deltane , Deltanw , & Deltase , Deltasw) - !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure - !----------------------------------------------------------------- - - call viscous_coeffs_and_rep_pressure (strength(i,j), tinyarea(i,j), & - Deltane, Deltanw, & - Deltasw, Deltase, & - zetax2(i,j,1), zetax2(i,j,2), & - zetax2(i,j,3), zetax2(i,j,4), & - etax2(i,j,1), etax2(i,j,2), & - etax2(i,j,3), etax2(i,j,4), & - rep_prs(i,j,1), rep_prs(i,j,2), & - rep_prs(i,j,3), rep_prs(i,j,4), & - capping) - + !----------------------------------------------------------------- + ! viscosities and replacement pressure + !----------------------------------------------------------------- + + call visc_replpress (strength(i,j) , DminTarea(i,j) , & + Deltane , zetax2 (i,j,1), & + etax2 (i,j,1), rep_prs (i,j,1), & + capping) + call visc_replpress (strength(i,j) , DminTarea(i,j) , & + Deltanw , zetax2 (i,j,2), & + etax2 (i,j,2), rep_prs (i,j,2), & + capping) + call visc_replpress (strength(i,j) , DminTarea(i,j) , & + Deltasw , zetax2 (i,j,3), & + etax2 (i,j,3), rep_prs (i,j,3), & + capping) + call visc_replpress (strength(i,j) , DminTarea(i,j) , & + Deltase , zetax2 (i,j,4), & + etax2 (i,j,4), rep_prs (i,j,4), & + capping) + !----------------------------------------------------------------- ! the stresses ! kg/s^2 ! (1) northeast, (2) northwest, (3) southwest, (4) southeast @@ -1245,7 +1266,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & stressp_2 = -rep_prs(i,j,2) stressp_3 = -rep_prs(i,j,3) stressp_4 = -rep_prs(i,j,4) - + !----------------------------------------------------------------- ! combinations of the Pr related stresses for the momentum equation ! kg/s^2 !----------------------------------------------------------------- @@ -1261,7 +1282,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & csigpnw = p111*stressp_2 + ssigp1 + p027*stressp_4 csigpsw = p111*stressp_3 + ssigp2 + p027*stressp_1 csigpse = p111*stressp_4 + ssigp1 + p027*stressp_2 - + !----------------------------------------------------------------- ! for dF/dx (u momentum) !----------------------------------------------------------------- @@ -1318,7 +1339,7 @@ end subroutine calc_zeta_dPr ! Lemieux, J.-F., and Dupont, F. (2020), On the calculation of normalized ! viscous-plastic sea ice stresses, Geosci. Model Dev., 13, 1763–1769, - + subroutine stress_vp (nx_block , ny_block , & icellt , & indxti , indxtj , & @@ -1356,10 +1377,10 @@ subroutine stress_vp (nx_block , ny_block , & cxm ! 0.5*HTN - 1.5*HTS real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 , & ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 , & ! etax2 = 2*eta (shear viscosity) rep_prs - + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 @@ -1405,17 +1426,17 @@ subroutine stress_vp (nx_block , ny_block , & ! the stresses ! kg/s^2 ! (1) northeast, (2) northwest, (3) southwest, (4) southeast !----------------------------------------------------------------- - + stressp_1(i,j) = zetax2(i,j,1)*divune - rep_prs(i,j,1) stressp_2(i,j) = zetax2(i,j,2)*divunw - rep_prs(i,j,2) stressp_3(i,j) = zetax2(i,j,3)*divusw - rep_prs(i,j,3) stressp_4(i,j) = zetax2(i,j,4)*divuse - rep_prs(i,j,4) - + stressm_1(i,j) = etax2(i,j,1)*tensionne stressm_2(i,j) = etax2(i,j,2)*tensionnw stressm_3(i,j) = etax2(i,j,3)*tensionsw stressm_4(i,j) = etax2(i,j,4)*tensionse - + stress12_1(i,j) = etax2(i,j,1)*shearne*p5 stress12_2(i,j) = etax2(i,j,2)*shearnw*p5 stress12_3(i,j) = etax2(i,j,3)*shearsw*p5 @@ -1484,7 +1505,7 @@ subroutine calc_vrel_Cb (nx_block, ny_block, & ! (magnitude of relative ocean current)*rhow*drag*aice vrel(i,j) = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & (vocn(i,j) - vvel(i,j))**2) ! m/s - + Cb(i,j) = Tbu(i,j) / (sqrt(uvel(i,j)**2 + vvel(i,j)**2) + u0) ! for seabed stress enddo ! ij @@ -1528,7 +1549,7 @@ subroutine calc_seabed_stress (nx_block, ny_block, & do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - + taubx(i,j) = -uvel(i,j)*Cb(i,j) tauby(i,j) = -vvel(i,j)*Cb(i,j) enddo ! ij @@ -1589,8 +1610,8 @@ subroutine matvec (nx_block, ny_block, & uarear ! 1/uarea real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 ! etax2 = 2*eta (shear viscosity) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & Au , & ! matvec, Fx = bx - Au (N/m^2) @@ -1669,12 +1690,12 @@ subroutine matvec (nx_block, ny_block, & stressp_2 = zetax2(i,j,2)*divunw! - Deltanw*(c1-Ktens)) stressp_3 = zetax2(i,j,3)*divusw! - Deltasw*(c1-Ktens)) stressp_4 = zetax2(i,j,4)*divuse! - Deltase*(c1-Ktens)) - + stressm_1 = etax2(i,j,1)*tensionne stressm_2 = etax2(i,j,2)*tensionnw stressm_3 = etax2(i,j,3)*tensionsw stressm_4 = etax2(i,j,4)*tensionse - + stress12_1 = etax2(i,j,1)*shearne*p5 stress12_2 = etax2(i,j,2)*shearnw*p5 stress12_3 = etax2(i,j,3)*shearsw*p5 @@ -1709,12 +1730,12 @@ subroutine matvec (nx_block, ny_block, & csigpnw = p111*stressp_2 + ssigp1 + p027*stressp_4 csigpsw = p111*stressp_3 + ssigp2 + p027*stressp_1 csigpse = p111*stressp_4 + ssigp1 + p027*stressp_2 - + csigmne = p111*stressm_1 + ssigm2 + p027*stressm_3 csigmnw = p111*stressm_2 + ssigm1 + p027*stressm_4 csigmsw = p111*stressm_3 + ssigm2 + p027*stressm_1 csigmse = p111*stressm_4 + ssigm1 + p027*stressm_2 - + csig12ne = p222*stress12_1 + ssig122 & + p055*stress12_3 csig12nw = p222*stress12_2 + ssig121 & @@ -1780,7 +1801,7 @@ subroutine matvec (nx_block, ny_block, & - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw enddo ! ij - icellt - + !----------------------------------------------------------------- ! Form Au and Av !----------------------------------------------------------------- @@ -1790,7 +1811,7 @@ subroutine matvec (nx_block, ny_block, & j = indxuj(ij) ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s - + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel(i,j) * sinw ! kg/m^2 s ! divergence of the internal stress tensor @@ -1903,11 +1924,11 @@ subroutine calc_bvec (nx_block, ny_block, & rhow ! character(len=*), parameter :: subname = '(calc_bvec)' - + !----------------------------------------------------------------- ! calc b vector !----------------------------------------------------------------- - + call icepack_query_parameters(rhow_out=rhow) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -1920,7 +1941,7 @@ subroutine calc_bvec (nx_block, ny_block, & ! ice/ocean stress taux = vrel(i,j)*waterx(i,j) ! NOTE this is not the entire tauy = vrel(i,j)*watery(i,j) ! ocn stress term - + ! divergence of the internal stress tensor (only Pr part, i.e. dPr/dx, dPr/dy) strintx = uarear(i,j)* & (stPr(i,j,1) + stPr(i+1,j,2) + stPr(i,j+1,3) + stPr(i+1,j+1,4)) @@ -1983,7 +2004,7 @@ subroutine residual_vec (nx_block , ny_block, & if (present(sum_squared)) then sum_squared = c0 endif - + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) @@ -2031,8 +2052,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & cxm ! 0.5*HTN - 1.5*HTS real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 ! etax2 = 2*eta (shear viscosity) real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(out) :: & Drheo ! intermediate value for diagonal components of matrix A associated @@ -2068,11 +2089,11 @@ subroutine formDiag_step1 (nx_block, ny_block, & !----------------------------------------------------------------- Drheo(:,:,:) = c0 - + ! Be careful: Drheo contains 4 terms for u and 4 terms for v. ! These 8 terms come from the surrounding T cells but are all ! refrerenced to the i,j (u point) : - + ! Drheo(i,j,1) corresponds to str(i,j,1) ! Drheo(i,j,2) corresponds to str(i+1,j,2) ! Drheo(i,j,3) corresponds to str(i,j+1,3) @@ -2081,9 +2102,9 @@ subroutine formDiag_step1 (nx_block, ny_block, & ! Drheo(i,j,6) corresponds to str(i,j+1,6) ! Drheo(i,j,7) corresponds to str(i+1,j,7) ! Drheo(i,j,8) corresponds to str(i+1,j+1,8)) - + do cc = 1, 8 ! 4 for u and 4 for v - + if (cc == 1) then ! u comp, T cell i,j uij = c1 ui1j = c0 @@ -2175,12 +2196,12 @@ subroutine formDiag_step1 (nx_block, ny_block, & endif do ij = 1, icellu - + iu = indxui(ij) ju = indxuj(ij) i = iu + di j = ju + dj - + !----------------------------------------------------------------- ! strain rates ! NOTE these are actually strain rates * area (m^2/s) @@ -2214,22 +2235,22 @@ subroutine formDiag_step1 (nx_block, ny_block, & - cxp(i,j)*ui1j1 + dxt(i,j)*ui1j shearse = -cym(i,j)*vij1 - dyt(i,j)*vi1j1 & - cxp(i,j)*uij1 + dxt(i,j)*uij - + !----------------------------------------------------------------- ! the stresses ! kg/s^2 ! (1) northeast, (2) northwest, (3) southwest, (4) southeast !----------------------------------------------------------------- - + stressp_1 = zetax2(i,j,1)*divune stressp_2 = zetax2(i,j,2)*divunw stressp_3 = zetax2(i,j,3)*divusw stressp_4 = zetax2(i,j,4)*divuse - + stressm_1 = etax2(i,j,1)*tensionne stressm_2 = etax2(i,j,2)*tensionnw stressm_3 = etax2(i,j,3)*tensionsw stressm_4 = etax2(i,j,4)*tensionse - + stress12_1 = etax2(i,j,1)*shearne*p5 stress12_2 = etax2(i,j,2)*shearnw*p5 stress12_3 = etax2(i,j,3)*shearsw*p5 @@ -2264,12 +2285,12 @@ subroutine formDiag_step1 (nx_block, ny_block, & csigpnw = p111*stressp_2 + ssigp1 + p027*stressp_4 csigpsw = p111*stressp_3 + ssigp2 + p027*stressp_1 csigpse = p111*stressp_4 + ssigp1 + p027*stressp_2 - + csigmne = p111*stressm_1 + ssigm2 + p027*stressm_3 csigmnw = p111*stressm_2 + ssigm1 + p027*stressm_4 csigmsw = p111*stressm_3 + ssigm2 + p027*stressm_1 csigmse = p111*stressm_4 + ssigm1 + p027*stressm_2 - + csig12ne = p222*stress12_1 + ssig122 & + p055*stress12_3 csig12nw = p222*stress12_2 + ssig121 & @@ -2287,27 +2308,27 @@ subroutine formDiag_step1 (nx_block, ny_block, & !----------------------------------------------------------------- ! for dF/dx (u momentum) !----------------------------------------------------------------- - + if (cc == 1) then ! T cell i,j - + strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) ! northeast (i,j) Drheo(iu,ju,1) = -strp_tmp - strm_tmp - str12ew & + dxhy(i,j)*(-csigpne + csigmne) + dyhx(i,j)*csig12ne - + elseif (cc == 2) then ! T cell i+1,j - + strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) - + ! northwest (i+1,j) Drheo(iu,ju,2) = strp_tmp + strm_tmp - str12we & + dxhy(i,j)*(-csigpnw + csigmnw) + dyhx(i,j)*csig12nw elseif (cc == 3) then ! T cell i,j+1 - + strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) @@ -2316,10 +2337,10 @@ subroutine formDiag_step1 (nx_block, ny_block, & + dxhy(i,j)*(-csigpse + csigmse) + dyhx(i,j)*csig12se elseif (cc == 4) then ! T cell i+1,j+1 - + strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) - + ! southwest (i+1,j+1) Drheo(iu,ju,4) = strp_tmp + strm_tmp + str12we & + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw @@ -2327,9 +2348,9 @@ subroutine formDiag_step1 (nx_block, ny_block, & !----------------------------------------------------------------- ! for dF/dy (v momentum) !----------------------------------------------------------------- - + elseif (cc == 5) then ! T cell i,j - + strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) @@ -2338,16 +2359,16 @@ subroutine formDiag_step1 (nx_block, ny_block, & - dyhx(i,j)*(csigpne + csigmne) + dxhy(i,j)*csig12ne elseif (cc == 6) then ! T cell i,j+1 - + strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) - + ! southeast (i,j+1) Drheo(iu,ju,6) = strp_tmp - strm_tmp - str12sn & - dyhx(i,j)*(csigpse + csigmse) + dxhy(i,j)*csig12se elseif (cc == 7) then ! T cell i,j+1 - + strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) @@ -2356,14 +2377,14 @@ subroutine formDiag_step1 (nx_block, ny_block, & - dyhx(i,j)*(csigpnw + csigmnw) + dxhy(i,j)*csig12nw elseif (cc == 8) then ! T cell i+1,j+1 - + strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) - + ! southwest (i+1,j+1) Drheo(iu,ju,8) = strp_tmp - strm_tmp + str12sn & - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw - + endif enddo ! ij @@ -2423,7 +2444,7 @@ subroutine formDiag_step2 (nx_block, ny_block, & strintx = c0 strinty = c0 - + ! Be careful: Drheo contains 4 terms for u and 4 terms for v. ! These 8 terms come from the surrounding T cells but are all ! refrerenced to the i,j (u point) : @@ -2436,13 +2457,13 @@ subroutine formDiag_step2 (nx_block, ny_block, & ! Drheo(i,j,6) corresponds to str(i,j+1,6) ! Drheo(i,j,7) corresponds to str(i+1,j,7) ! Drheo(i,j,8) corresponds to str(i+1,j+1,8)) - + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s - + strintx = uarear(i,j)* & (Drheo(i,j,1) + Drheo(i,j,2) + Drheo(i,j,3) + Drheo(i,j,4)) strinty = uarear(i,j)* & @@ -2491,14 +2512,14 @@ subroutine calc_L2norm_squared (nx_block, ny_block, & !----------------------------------------------------------------- L2norm = c0 - + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - + L2norm = L2norm + tpu(i,j)**2 + tpv(i,j)**2 enddo ! ij - + end subroutine calc_L2norm_squared !======================================================================= @@ -2545,7 +2566,7 @@ subroutine arrays_to_vec (nx_block, ny_block , & outvec(:) = c0 tot = 0 - + do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -2582,7 +2603,7 @@ subroutine vec_to_arrays (nx_block, ny_block , & integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & indxui , & ! compressed index in i-direction indxuj ! compressed index in j-direction - + real (kind=dbl_kind), dimension (ntot), intent(in) :: & invec ! input 1D vector @@ -2604,7 +2625,7 @@ subroutine vec_to_arrays (nx_block, ny_block , & tpu(:,:,:) = c0 tpv(:,:,:) = c0 tot = 0 - + do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -2633,9 +2654,9 @@ subroutine qr_delete(Q, R) real (kind=dbl_kind), intent(inout) :: & Q(:,:), & ! Q factor R(:,:) ! R factor - + ! local variables - + integer (kind=int_kind) :: & i, j, k, & ! loop indices m, n ! size of Q matrix @@ -2644,7 +2665,7 @@ subroutine qr_delete(Q, R) temp, c, s character(len=*), parameter :: subname = '(qr_delete)' - + n = size(Q, 1) m = size(Q, 2) do i = 1, m-1 @@ -2667,7 +2688,7 @@ subroutine qr_delete(Q, R) enddo enddo R(:, 1:m-1) = R(:, 2:m) - + end subroutine qr_delete !======================================================================= @@ -2693,9 +2714,9 @@ subroutine fgmres (zetax2 , etax2 , & use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 ! etax2 = 2*eta (shear viscous coeff) - + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 ! etax2 = 2*eta (shear viscosity) + real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & vrel , & ! coefficient for tauw Cb , & ! seabed stress coefficient @@ -2781,18 +2802,18 @@ subroutine fgmres (zetax2 , etax2 , & ! Initialize outiter = 0 nbiter = 0 - + norm_squared = c0 precond_type = precond - + ! Cells with no ice should be zero-initialized workspace_x = c0 workspace_y = c0 arnoldi_basis_x = c0 arnoldi_basis_y = c0 - + ! Residual of the initial iterate - + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & @@ -2818,7 +2839,7 @@ subroutine fgmres (zetax2 , etax2 , & arnoldi_basis_y (:,:,iblk, 1)) enddo !$OMP END PARALLEL DO - + ! Start outer (restarts) loop do ! Compute norm of initial residual @@ -2834,17 +2855,17 @@ subroutine fgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO norm_residual = sqrt(global_sum(sum(norm_squared), distrb_info)) - + if (my_task == master_task .and. monitor_fgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_fgmres: iter_fgmres= ", nbiter, & " fgmres_L2norm= ", norm_residual endif - + ! Current guess is a good enough solution TODO: reactivate and test this ! if (norm_residual < tolerance) then ! return ! end if - + ! Normalize the first Arnoldi vector inverse_norm = c1 / norm_residual !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) @@ -2858,20 +2879,20 @@ subroutine fgmres (zetax2 , etax2 , & enddo ! ij enddo !$OMP END PARALLEL DO - + if (outiter == 0) then relative_tolerance = tolerance * norm_residual end if - + ! Initialize 1-st term of RHS of Hessenberg system rhs_hess(1) = norm_residual rhs_hess(2:) = c0 - + initer = 0 - + ! Start of inner (Arnoldi) loop do - + nbiter = nbiter + 1 initer = initer + 1 nextit = initer + 1 @@ -2886,9 +2907,9 @@ subroutine fgmres (zetax2 , etax2 , & workspace_x , workspace_y) orig_basis_x(:,:,:,initer) = workspace_x orig_basis_y(:,:,:,initer) = workspace_y - + ! Update workspace with boundary values - call stack_velocity_field(workspace_x, workspace_y, fld2) + call stack_fields(workspace_x, workspace_y, fld2) call ice_timer_start(timer_bound) if (maskhalo_dyn) then call ice_HaloUpdate (fld2, halo_info_mask, & @@ -2898,7 +2919,7 @@ subroutine fgmres (zetax2 , etax2 , & field_loc_NEcorner, field_type_vector) endif call ice_timer_stop(timer_bound) - call unstack_velocity_field(fld2, workspace_x, workspace_y) + call unstack_fields(fld2, workspace_x, workspace_y) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -2919,13 +2940,13 @@ subroutine fgmres (zetax2 , etax2 , & arnoldi_basis_y(:,:,iblk,nextit)) enddo !$OMP END PARALLEL DO - + ! Orthogonalize the new vector call orthogonalize(ortho_type , initer , & nextit , maxinner , & arnoldi_basis_x, arnoldi_basis_y, & hessenberg) - + ! Compute norm of new Arnoldi vector and update Hessenberg matrix !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -2938,7 +2959,7 @@ subroutine fgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO hessenberg(nextit,initer) = sqrt(global_sum(sum(norm_squared), distrb_info)) - + ! Watch out for happy breakdown if (.not. almost_zero( hessenberg(nextit,initer) ) ) then ! Normalize next Arnoldi vector @@ -2948,14 +2969,14 @@ subroutine fgmres (zetax2 , etax2 , & do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm enddo ! ij enddo !$OMP END PARALLEL DO end if - + ! Apply previous Givens rotation to the last column of the Hessenberg matrix if (initer > 1) then do k = 2, initer @@ -2964,33 +2985,33 @@ subroutine fgmres (zetax2 , etax2 , & hessenberg(k, initer) = -rot_sin(k-1)*t + rot_cos(k-1)*hessenberg(k, initer) end do end if - + ! Compute and apply new Givens rotation nu = sqrt(hessenberg(initer,initer)**2 + hessenberg(nextit,initer)**2) if (.not. almost_zero(nu)) then rot_cos(initer) = hessenberg(initer,initer) / nu rot_sin(initer) = hessenberg(nextit,initer) / nu - + rhs_hess(nextit) = -rot_sin(initer) * rhs_hess(initer) rhs_hess(initer) = rot_cos(initer) * rhs_hess(initer) - + hessenberg(initer,initer) = rot_cos(initer) * hessenberg(initer,initer) + rot_sin(initer) * hessenberg(nextit,initer) end if - + ! Check for convergence norm_residual = abs(rhs_hess(nextit)) - + if (my_task == master_task .and. monitor_fgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_fgmres: iter_fgmres= ", nbiter, & " fgmres_L2norm= ", norm_residual endif - + if ((initer >= maxinner) .or. (norm_residual <= relative_tolerance)) then exit endif - + end do ! end of inner (Arnoldi) loop - + ! At this point either the maximum number of inner iterations ! was reached or the absolute residual is below the scaled tolerance. @@ -3005,7 +3026,7 @@ subroutine fgmres (zetax2 , etax2 , & end do rhs_hess(k) = t / hessenberg(k,k) end do - + ! Form linear combination to get new solution iterate do it = 1, initer t = rhs_hess(it) @@ -3021,7 +3042,7 @@ subroutine fgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO end do - + ! Increment outer loop counter and check for convergence outiter = outiter + 1 if (norm_residual <= relative_tolerance .or. outiter >= maxouter) then @@ -3029,7 +3050,7 @@ subroutine fgmres (zetax2 , etax2 , & end if ! Solution is not convergent : compute residual vector and continue. - + ! The residual vector is computed here using (see Saad p. 177) : ! \begin{equation} ! r = V_{m+1} * Q_m^T * (\gamma_{m+1} * e_{m+1}) @@ -3040,7 +3061,7 @@ subroutine fgmres (zetax2 , etax2 , & ! $Q_m$ is the product of the Givens rotation : Q_m = G_m G_{m-1} ... G_1 ! $gamma_{m+1}$ is the last element of rhs_hess ! $e_{m+1})$ is the unit vector (0, 0, ..., 1)^T \in \reals^{m+1} - + ! Apply the Givens rotation in reverse order to g := \gamma_{m+1} * e_{m+1}, ! store the result in rhs_hess do it = 1, initer @@ -3048,7 +3069,7 @@ subroutine fgmres (zetax2 , etax2 , & rhs_hess(jj-1) = -rot_sin(jj-1) * rhs_hess(jj) ! + rot_cos(jj-1) * g(jj-1) (== 0) rhs_hess(jj) = rot_cos(jj-1) * rhs_hess(jj) ! + rot_sin(jj-1) * g(jj-1) (== 0) end do - + ! Compute the residual by multiplying V_{m+1} and rhs_hess workspace_x = c0 workspace_y = c0 @@ -3068,7 +3089,7 @@ subroutine fgmres (zetax2 , etax2 , & arnoldi_basis_y(:,:,:,1) = workspace_y end do end do ! end of outer (restarts) loop - + end subroutine fgmres !======================================================================= @@ -3089,9 +3110,9 @@ subroutine pgmres (zetax2 , etax2 , & nbiter) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 ! etax2 = 2*eta (shear viscous coeff) - + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 ! etax2 = 2*eta (shear viscosity) + real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & vrel , & ! coefficient for tauw Cb , & ! seabed stress coefficient @@ -3167,25 +3188,25 @@ subroutine pgmres (zetax2 , etax2 , & relative_tolerance ! relative_tolerance, i.e. tolerance*norm(initial residual) character(len=*), parameter :: subname = '(pgmres)' - + ! Here we go ! ! Initialize outiter = 0 nbiter = 0 - + norm_squared = c0 precond_type = 'diag' ! Jacobi preconditioner ortho_type = 'cgs' ! classical gram-schmidt TODO: try with MGS - + ! Cells with no ice should be zero-initialized workspace_x = c0 workspace_y = c0 arnoldi_basis_x = c0 arnoldi_basis_y = c0 - + ! Residual of the initial iterate - + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & @@ -3211,7 +3232,7 @@ subroutine pgmres (zetax2 , etax2 , & arnoldi_basis_y (:,:,iblk, 1)) enddo !$OMP END PARALLEL DO - + ! Start outer (restarts) loop do ! Compute norm of initial residual @@ -3227,17 +3248,17 @@ subroutine pgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO norm_residual = sqrt(global_sum(sum(norm_squared), distrb_info)) - + if (my_task == master_task .and. monitor_pgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_pgmres: iter_pgmres= ", nbiter, & " pgmres_L2norm= ", norm_residual endif - + ! Current guess is a good enough solution ! if (norm_residual < tolerance) then ! return ! end if - + ! Normalize the first Arnoldi vector inverse_norm = c1 / norm_residual !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) @@ -3251,24 +3272,24 @@ subroutine pgmres (zetax2 , etax2 , & enddo ! ij enddo !$OMP END PARALLEL DO - + if (outiter == 0) then relative_tolerance = tolerance * norm_residual end if - + ! Initialize 1-st term of RHS of Hessenberg system rhs_hess(1) = norm_residual rhs_hess(2:) = c0 - + initer = 0 - + ! Start of inner (Arnoldi) loop do - + nbiter = nbiter + 1 initer = initer + 1 nextit = initer + 1 - + ! precondition the current Arnoldi vector call precondition(zetax2 , etax2 , & Cb , vrel , & @@ -3278,10 +3299,10 @@ subroutine pgmres (zetax2 , etax2 , & diagx , diagy , & precond_type, & workspace_x , workspace_y) - + ! NOTE: halo updates for (workspace_x, workspace_y) ! are skipped here for efficiency since this is just a preconditioner - + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & @@ -3301,13 +3322,13 @@ subroutine pgmres (zetax2 , etax2 , & arnoldi_basis_y(:,:,iblk,nextit)) enddo !$OMP END PARALLEL DO - + ! Orthogonalize the new vector call orthogonalize(ortho_type , initer , & nextit , maxinner , & arnoldi_basis_x, arnoldi_basis_y, & hessenberg) - + ! Compute norm of new Arnoldi vector and update Hessenberg matrix !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -3320,7 +3341,7 @@ subroutine pgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO hessenberg(nextit,initer) = sqrt(global_sum(sum(norm_squared), distrb_info)) - + ! Watch out for happy breakdown if (.not. almost_zero( hessenberg(nextit,initer) ) ) then ! Normalize next Arnoldi vector @@ -3330,14 +3351,14 @@ subroutine pgmres (zetax2 , etax2 , & do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm enddo ! ij enddo !$OMP END PARALLEL DO end if - + ! Apply previous Givens rotation to the last column of the Hessenberg matrix if (initer > 1) then do k = 2, initer @@ -3346,33 +3367,33 @@ subroutine pgmres (zetax2 , etax2 , & hessenberg(k, initer) = -rot_sin(k-1)*t + rot_cos(k-1)*hessenberg(k, initer) end do end if - + ! Compute and apply new Givens rotation nu = sqrt(hessenberg(initer,initer)**2 + hessenberg(nextit,initer)**2) if (.not. almost_zero(nu)) then rot_cos(initer) = hessenberg(initer,initer) / nu rot_sin(initer) = hessenberg(nextit,initer) / nu - + rhs_hess(nextit) = -rot_sin(initer) * rhs_hess(initer) rhs_hess(initer) = rot_cos(initer) * rhs_hess(initer) - + hessenberg(initer,initer) = rot_cos(initer) * hessenberg(initer,initer) + rot_sin(initer) * hessenberg(nextit,initer) end if - + ! Check for convergence norm_residual = abs(rhs_hess(nextit)) - + if (my_task == master_task .and. monitor_pgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_pgmres: iter_pgmres= ", nbiter, & " pgmres_L2norm= ", norm_residual endif - + if ((initer >= maxinner) .or. (norm_residual <= relative_tolerance)) then exit endif - + end do ! end of inner (Arnoldi) loop - + ! At this point either the maximum number of inner iterations ! was reached or the absolute residual is below the scaled tolerance. @@ -3387,7 +3408,7 @@ subroutine pgmres (zetax2 , etax2 , & end do rhs_hess(k) = t / hessenberg(k,k) end do - + ! Form linear combination to get new solution iterate workspace_x = c0 workspace_y = c0 @@ -3405,7 +3426,7 @@ subroutine pgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO end do - + ! Call preconditioner call precondition(zetax2 , etax2 , & Cb , vrel , & @@ -3414,10 +3435,10 @@ subroutine pgmres (zetax2 , etax2 , & diagx , diagy , & precond_type, & workspace_x , workspace_y) - + solx = solx + workspace_x soly = soly + workspace_y - + ! Increment outer loop counter and check for convergence outiter = outiter + 1 if (norm_residual <= relative_tolerance .or. outiter >= maxouter) then @@ -3425,7 +3446,7 @@ subroutine pgmres (zetax2 , etax2 , & end if ! Solution is not convergent : compute residual vector and continue. - + ! The residual vector is computed here using (see Saad p. 177) : ! \begin{equation} ! r = V_{m+1} * Q_m^T * (\gamma_{m+1} * e_{m+1}) @@ -3436,7 +3457,7 @@ subroutine pgmres (zetax2 , etax2 , & ! $Q_m$ is the product of the Givens rotation : Q_m = G_m G_{m-1} ... G_1 ! $gamma_{m+1}$ is the last element of rhs_hess ! $e_{m+1})$ is the unit vector (0, 0, ..., 1)^T \in \reals^{m+1} - + ! Apply the Givens rotation in reverse order to g := \gamma_{m+1} * e_{m+1}, ! store the result in rhs_hess do it = 1, initer @@ -3444,7 +3465,7 @@ subroutine pgmres (zetax2 , etax2 , & rhs_hess(jj-1) = -rot_sin(jj-1) * rhs_hess(jj) ! + rot_cos(jj-1) * g(jj-1) (== 0) rhs_hess(jj) = rot_cos(jj-1) * rhs_hess(jj) ! + rot_sin(jj-1) * g(jj-1) (== 0) end do - + ! Compute the residual by multiplying V_{m+1} and rhs_hess workspace_x = c0 workspace_y = c0 @@ -3464,7 +3485,7 @@ subroutine pgmres (zetax2 , etax2 , & arnoldi_basis_y(:,:,:,1) = workspace_y end do end do ! end of outer (restarts) loop - + end subroutine pgmres !======================================================================= @@ -3482,9 +3503,9 @@ subroutine precondition(zetax2 , etax2, & wx , wy) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 ! etax2 = 2*eta (shear viscous coeff) - + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 ! etax2 = 2*eta (shear viscosity) + real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & vrel , & ! coefficient for tauw Cb , & ! seabed stress coefficient @@ -3612,20 +3633,20 @@ subroutine orthogonalize(ortho_type , initer , & dotprod_local = c0 do it = 1, initer local_dot = c0 - + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + local_dot(iblk) = local_dot(iblk) + & (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & (arnoldi_basis_y(i, j, iblk, it) * arnoldi_basis_y(i, j, iblk, nextit)) enddo ! ij enddo !$OMP END PARALLEL DO - + dotprod_local(it) = sum(local_dot) end do @@ -3638,7 +3659,7 @@ subroutine orthogonalize(ortho_type , initer , & do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit) & @@ -3651,28 +3672,28 @@ subroutine orthogonalize(ortho_type , initer , & ! Modified Gram-Schmidt orthogonalisation process do it = 1, initer local_dot = c0 - + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + local_dot(iblk) = local_dot(iblk) + & (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & (arnoldi_basis_y(i, j, iblk, it) * arnoldi_basis_y(i, j, iblk, nextit)) enddo ! ij enddo !$OMP END PARALLEL DO - + hessenberg(it,initer) = global_sum(sum(local_dot), distrb_info) - + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit) & @@ -3685,7 +3706,7 @@ subroutine orthogonalize(ortho_type , initer , & call abort_ice(error_message='wrong orthonalization in ' // subname, & file=__FILE__, line=__LINE__) endif - + end subroutine orthogonalize !======================================================================= @@ -3716,7 +3737,7 @@ logical function almost_zero(A) result(retval) end if ! lexicographic order test with a tolerance of 1 adjacent float retval = (abs(aBit) <= 1) - + end function almost_zero !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index ac88d8ff4..9c9fa25d4 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -2,12 +2,12 @@ ! ! Drivers for remapping and upwind ice transport ! -! authors: Elizabeth C. Hunke and William H. Lipscomb, LANL +! authors: Elizabeth C. Hunke and William H. Lipscomb, LANL ! ! 2004: Revised by William Lipscomb from ice_transport_mpdata. ! Stripped out mpdata, retained upwind, and added block structure. ! 2006: Incorporated remap transport driver and renamed from -! ice_transport_upwind. +! ice_transport_upwind. ! 2011: ECH moved edgearea arrays into ice_transport_remap.F90 module ice_transport_driver @@ -36,17 +36,17 @@ module ice_transport_driver ! 'upwind' => 1st order donor cell scheme ! 'remap' => remapping scheme - logical, parameter :: & ! if true, prescribe area flux across each edge - l_fixed_area = .false. + logical, parameter :: & + l_fixed_area = .false. ! if true, prescribe area flux across each edge ! NOTE: For remapping, hice and hsno are considered tracers. ! ntrace is not equal to ntrcr! integer (kind=int_kind) :: & ntrace ! number of tracers in use - + integer (kind=int_kind), dimension(:), allocatable, public :: & - tracer_type ,&! = 1, 2, or 3 (depends on 0, 1 or 2 other tracers) + tracer_type , & ! = 1, 2, or 3 (depends on 0, 1 or 2 other tracers) depend ! tracer dependencies (see below) logical (kind=log_kind), dimension (:), allocatable, public :: & @@ -56,13 +56,13 @@ module ice_transport_driver conserv_check ! if true, check conservation integer (kind=int_kind), parameter :: & - integral_order = 3 ! polynomial order of quadrature integrals - ! linear=1, quadratic=2, cubic=3 + integral_order = 3 ! polynomial order of quadrature integrals + ! linear=1, quadratic=2, cubic=3 logical (kind=log_kind), parameter :: & - l_dp_midpt = .true. ! if true, find departure points using - ! corrected midpoint velocity - + l_dp_midpt = .true. ! if true, find departure points using + ! corrected midpoint velocity + !======================================================================= contains @@ -84,158 +84,160 @@ subroutine init_transport integer (kind=int_kind) :: & k, nt, nt1 ! tracer indices - integer (kind=int_kind) :: ntrcr, nt_Tsfc, nt_qice, nt_qsno, & - nt_sice, nt_fbri, nt_iage, nt_FY, nt_alvl, nt_vlvl, & - nt_apnd, nt_hpnd, nt_ipnd, nt_fsd, & - nt_smice, nt_smliq, nt_rhos, nt_rsnw, & - nt_isosno, nt_isoice, nt_bgc_Nit, nt_bgc_S + integer (kind=int_kind) :: & + ntrcr , nt_Tsfc , nt_qice , nt_qsno , & + nt_sice , nt_fbri , nt_iage , nt_FY , & + nt_alvl , nt_vlvl , & + nt_apnd , nt_hpnd , nt_ipnd , nt_fsd , & + nt_smice , nt_smliq , nt_rhos , nt_rsnw , & + nt_isosno, nt_isoice, nt_bgc_Nit, nt_bgc_S character(len=*), parameter :: subname = '(init_transport)' - call ice_timer_start(timer_advect) ! advection + call ice_timer_start(timer_advect) ! advection call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_qice_out=nt_qice, & - nt_qsno_out=nt_qsno, nt_sice_out=nt_sice, nt_fbri_out=nt_fbri, & - nt_iage_out=nt_iage, nt_FY_out=nt_FY, nt_fsd_out=nt_fsd, & - nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & - nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, nt_ipnd_out=nt_ipnd, & - nt_smice_out=nt_smice, nt_smliq_out=nt_smliq, nt_rhos_out=nt_rhos, & - nt_rsnw_out=nt_rsnw, & - nt_bgc_Nit_out=nt_bgc_Nit, nt_bgc_S_out=nt_bgc_S, & - nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) + nt_qsno_out=nt_qsno, nt_sice_out=nt_sice, nt_fbri_out=nt_fbri, & + nt_iage_out=nt_iage, nt_FY_out=nt_FY, nt_fsd_out=nt_fsd, & + nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & + nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, nt_ipnd_out=nt_ipnd, & + nt_smice_out=nt_smice, nt_smliq_out=nt_smliq, nt_rhos_out=nt_rhos, & + nt_rsnw_out=nt_rsnw, & + nt_bgc_Nit_out=nt_bgc_Nit, nt_bgc_S_out=nt_bgc_S, & + nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) ntrace = 2 + ntrcr ! hice,hsno,trcr - if (allocated(tracer_type)) deallocate(tracer_type) - if (allocated(depend)) deallocate(depend) + if (allocated(tracer_type)) deallocate(tracer_type) + if (allocated(depend)) deallocate(depend) if (allocated(has_dependents)) deallocate(has_dependents) allocate (tracer_type (ntrace), & depend (ntrace), & has_dependents(ntrace)) - ! define tracer dependency arrays - ! see comments in remapping routine - - depend(1:2) = 0 ! hice, hsno - tracer_type(1:2) = 1 ! no dependency - - k = 2 - - do nt = 1, ntrcr - depend(k+nt) = trcr_depend(nt) ! 0 for ice area tracers - ! 1 for ice volume tracers - ! 2 for snow volume tracers - tracer_type(k+nt) = 2 ! depends on 1 other tracer - if (trcr_depend(nt) == 0) then - tracer_type(k+nt) = 1 ! depends on no other tracers - elseif (trcr_depend(nt) > 2) then - if (trcr_depend(trcr_depend(nt)-2) > 0) then - tracer_type(k+nt) = 3 ! depends on 2 other tracers - endif - endif - enddo - - has_dependents = .false. - do nt = 1, ntrace - if (depend(nt) > 0) then - nt1 = depend(nt) - has_dependents(nt1) = .true. - if (nt1 > nt) then - write(nu_diag,*) & - 'Tracer nt2 =',nt,' depends on tracer nt1 =',nt1 - call abort_ice(subname// & - 'ERROR: remap transport: Must have nt2 > nt1') - endif - endif - enddo ! ntrace - - ! diagnostic output - if (my_task == master_task) then - write (nu_diag, *) 'tracer index depend type has_dependents' - nt = 1 - write(nu_diag,1000) 'hi ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - nt = 2 - write(nu_diag,1000) 'hs ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - k=2 - do nt = k+1, k+ntrcr - if (nt-k==nt_Tsfc) & - write(nu_diag,1000) 'nt_Tsfc ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_qice) & - write(nu_diag,1000) 'nt_qice ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_qsno) & - write(nu_diag,1000) 'nt_qsno ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_sice) & - write(nu_diag,1000) 'nt_sice ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_fbri) & - write(nu_diag,1000) 'nt_fbri ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_iage) & - write(nu_diag,1000) 'nt_iage ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_FY) & - write(nu_diag,1000) 'nt_FY ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_alvl) & - write(nu_diag,1000) 'nt_alvl ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_vlvl) & - write(nu_diag,1000) 'nt_vlvl ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_apnd) & - write(nu_diag,1000) 'nt_apnd ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_hpnd) & - write(nu_diag,1000) 'nt_hpnd ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_ipnd) & - write(nu_diag,1000) 'nt_ipnd ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_smice) & - write(nu_diag,1000) 'nt_smice ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_smliq) & - write(nu_diag,1000) 'nt_smliq ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_rhos) & - write(nu_diag,1000) 'nt_rhos ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_rsnw) & - write(nu_diag,1000) 'nt_rsnw ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_fsd) & - write(nu_diag,1000) 'nt_fsd ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_isosno) & - write(nu_diag,1000) 'nt_isosno ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_isoice) & - write(nu_diag,1000) 'nt_isoice ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_bgc_Nit) & - write(nu_diag,1000) 'nt_bgc_Nit ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_bgc_S) & - write(nu_diag,1000) 'nt_bgc_S ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - enddo - write(nu_diag,*) ' ' - endif ! master_task - 1000 format (1x,a,2x,i6,2x,i6,2x,i4,4x,l4) - - if (trim(advection)=='remap') call init_remap ! grid quantities - - call ice_timer_stop(timer_advect) ! advection + ! define tracer dependency arrays + ! see comments in remapping routine + + depend(1:2) = 0 ! hice, hsno + tracer_type(1:2) = 1 ! no dependency + + k = 2 + + do nt = 1, ntrcr + depend(k+nt) = trcr_depend(nt) ! 0 for ice area tracers + ! 1 for ice volume tracers + ! 2 for snow volume tracers + tracer_type(k+nt) = 2 ! depends on 1 other tracer + if (trcr_depend(nt) == 0) then + tracer_type(k+nt) = 1 ! depends on no other tracers + elseif (trcr_depend(nt) > 2) then + if (trcr_depend(trcr_depend(nt)-2) > 0) then + tracer_type(k+nt) = 3 ! depends on 2 other tracers + endif + endif + enddo + + has_dependents = .false. + do nt = 1, ntrace + if (depend(nt) > 0) then + nt1 = depend(nt) + has_dependents(nt1) = .true. + if (nt1 > nt) then + write(nu_diag,*) & + 'Tracer nt2 =',nt,' depends on tracer nt1 =',nt1 + call abort_ice(subname// & + 'ERROR: remap transport: Must have nt2 > nt1') + endif + endif + enddo ! ntrace + + ! diagnostic output + if (my_task == master_task) then + write (nu_diag, *) 'tracer index depend type has_dependents' + nt = 1 + write(nu_diag,1000) 'hi ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + nt = 2 + write(nu_diag,1000) 'hs ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + k=2 + do nt = k+1, k+ntrcr + if (nt-k==nt_Tsfc) & + write(nu_diag,1000) 'nt_Tsfc ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_qice) & + write(nu_diag,1000) 'nt_qice ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_qsno) & + write(nu_diag,1000) 'nt_qsno ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_sice) & + write(nu_diag,1000) 'nt_sice ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_fbri) & + write(nu_diag,1000) 'nt_fbri ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_iage) & + write(nu_diag,1000) 'nt_iage ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_FY) & + write(nu_diag,1000) 'nt_FY ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_alvl) & + write(nu_diag,1000) 'nt_alvl ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_vlvl) & + write(nu_diag,1000) 'nt_vlvl ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_apnd) & + write(nu_diag,1000) 'nt_apnd ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_hpnd) & + write(nu_diag,1000) 'nt_hpnd ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_ipnd) & + write(nu_diag,1000) 'nt_ipnd ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_smice) & + write(nu_diag,1000) 'nt_smice ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_smliq) & + write(nu_diag,1000) 'nt_smliq ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_rhos) & + write(nu_diag,1000) 'nt_rhos ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_rsnw) & + write(nu_diag,1000) 'nt_rsnw ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_fsd) & + write(nu_diag,1000) 'nt_fsd ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_isosno) & + write(nu_diag,1000) 'nt_isosno ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_isoice) & + write(nu_diag,1000) 'nt_isoice ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_bgc_Nit) & + write(nu_diag,1000) 'nt_bgc_Nit ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_bgc_S) & + write(nu_diag,1000) 'nt_bgc_S ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + enddo + write(nu_diag,*) ' ' + endif ! master_task + 1000 format (1x,a,2x,i6,2x,i6,2x,i4,4x,l4) + + if (trim(advection)=='remap') call init_remap ! grid quantities + + call ice_timer_stop(timer_advect) ! advection end subroutine init_transport @@ -248,7 +250,7 @@ end subroutine init_transport ! ! This scheme preserves monotonicity of ice area and tracers. That is, ! it does not produce new extrema. It is second-order accurate in space, -! except where gradients are limited to preserve monotonicity. +! except where gradients are limited to preserve monotonicity. ! ! authors William H. Lipscomb, LANL @@ -261,8 +263,8 @@ subroutine transport_remap (dt) use ice_domain_size, only: ncat, max_blocks use ice_blocks, only: nx_block, ny_block, block, get_block, nghost use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, & - uvel, vvel, bound_state - use ice_grid, only: tarea + uvel, vvel, bound_state, uvelE, vvelN + use ice_grid, only: tarea, grid_ice use ice_calendar, only: istep1 use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_advect, timer_bound @@ -273,88 +275,87 @@ subroutine transport_remap (dt) ! local variables - integer (kind=int_kind) :: & - iblk ,&! block index - ilo,ihi,jlo,jhi,&! beginning and end of physical domain - n ,&! ice category index - nt, nt1, nt2 ! tracer indices - - real (kind=dbl_kind), & - dimension (nx_block,ny_block,0:ncat,max_blocks) :: & - aim ,&! mean ice category areas in each grid cell + integer (kind=int_kind) :: & + iblk , & ! block index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + n , & ! ice category index + nt, nt1, nt2 ! tracer indices + + real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat,max_blocks) :: & + aim , & ! mean ice category areas in each grid cell aimask ! = 1. if ice is present, = 0. otherwise - real (kind=dbl_kind), & - dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & - trm ,&! mean tracer values in each grid cell + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & + trm , & ! mean tracer values in each grid cell trmask ! = 1. if tracer is present, = 0. otherwise - logical (kind=log_kind) :: & + logical (kind=log_kind) :: & ckflag ! if true, abort the model - integer (kind=int_kind) :: & - istop, jstop ! indices of grid cell where model aborts + integer (kind=int_kind) :: & + istop, jstop ! indices of grid cell where model aborts - integer (kind=int_kind), dimension(0:ncat,max_blocks) :: & + integer (kind=int_kind), dimension(0:ncat,max_blocks) :: & icellsnc ! number of cells with ice - integer (kind=int_kind), & - dimension(nx_block*ny_block,0:ncat,max_blocks) :: & - indxinc, indxjnc ! compressed i/j indices + integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat,max_blocks) :: & + indxinc, indxjnc ! compressed i/j indices integer (kind=int_kind) :: & - ntrcr + ntrcr ! type (block) :: & - this_block ! block information for current block - + this_block ! block information for current block + ! variables related to optional bug checks - logical (kind=log_kind), parameter :: & + logical (kind=log_kind), parameter :: & l_monotonicity_check = .false. ! if true, check monotonicity - real (kind=dbl_kind), dimension(0:ncat) :: & - asum_init ,&! initial global ice area + real (kind=dbl_kind), dimension(0:ncat) :: & + asum_init , & ! initial global ice area asum_final ! final global ice area - real (kind=dbl_kind), dimension(ntrace,ncat) :: & - atsum_init ,&! initial global ice area*tracer + real (kind=dbl_kind), dimension(ntrace,ncat) :: & + atsum_init , & ! initial global ice area*tracer atsum_final ! final global ice area*tracer - real (kind=dbl_kind), dimension (:,:,:,:,:), allocatable :: & - tmin ,&! local min tracer - tmax ! local max tracer + real (kind=dbl_kind), dimension (:,:,:,:,:), allocatable :: & + tmin , & ! local min tracer + tmax ! local max tracer - integer (kind=int_kind) :: alloc_error + integer (kind=int_kind) :: & + alloc_error real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & work1 - character(len=char_len_long) :: fieldid + character(len=char_len_long) :: & + fieldid character(len=*), parameter :: subname = '(transport_remap)' - call ice_timer_start(timer_advect) ! advection + call ice_timer_start(timer_advect) ! advection call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) -!---!------------------------------------------------------------------- -!---! Prepare for remapping. -!---! Initialize, update ghost cells, fill tracer arrays. -!---!------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Prepare for remapping. + ! Initialize, update ghost cells, fill tracer arrays. + !------------------------------------------------------------------- ckflag = .false. istop = 0 jstop = 0 - !------------------------------------------------------------------- - ! Compute open water area in each grid cell. - ! Note: An aggregate_area call is needed only if the open - ! water area has changed since the previous call. - ! Here we assume that aice0 is up to date. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute open water area in each grid cell. + ! Note: An aggregate_area call is needed only if the open + ! water area has changed since the previous call. + ! Here we assume that aice0 is up to date. + !------------------------------------------------------------------- ! !$OMP PARALLEL DO PRIVATE(i,j,iblk) SCHEDULE(runtime) ! do iblk = 1, nblocks @@ -363,16 +364,16 @@ subroutine transport_remap (dt) ! call aggregate_area (ncat, ! aicen(i,j,:,iblk), & ! aice (i,j, iblk), & -! aice0(i,j, iblk)) +! aice0(i,j, iblk)) ! enddo ! enddo ! enddo ! !$OMP END PARALLEL DO - !------------------------------------------------------------------- - ! Ghost cell updates for state variables. - ! Commented out because ghost cells are updated after cleanup_itd. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + ! Commented out because ghost cells are updated after cleanup_itd. + !------------------------------------------------------------------- ! call ice_timer_start(timer_bound) ! call ice_HaloUpdate (aice0, halo_info, & @@ -384,11 +385,11 @@ subroutine transport_remap (dt) ! call ice_timer_stop(timer_bound) - !------------------------------------------------------------------- - ! Ghost cell updates for ice velocity. - ! Commented out because ghost cell velocities are computed - ! in ice_dyn_evp. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Ghost cell updates for ice velocity. + ! Commented out because ghost cell velocities are computed + ! in ice_dyn_evp. + !------------------------------------------------------------------- ! call ice_timer_start(timer_bound) ! call ice_HaloUpdate (uvel, halo_info, & @@ -401,29 +402,29 @@ subroutine transport_remap (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - !------------------------------------------------------------------- - ! Fill arrays with fields to be remapped. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Fill arrays with fields to be remapped. + !------------------------------------------------------------------- - call state_to_tracers(nx_block, ny_block, & - ntrcr, ntrace, & - aice0(:,:, iblk), aicen(:,:,:,iblk), & - trcrn(:,:,:,:,iblk), & - vicen(:,:,:,iblk), vsnon(:,:, :,iblk), & - aim (:,:,:,iblk), trm (:,:,:,:,iblk)) + call state_to_tracers(nx_block, ny_block, & + ntrcr, ntrace, & + aice0(:,:, iblk), aicen(:,:,:, iblk), & + trcrn(:,:,:,:,iblk), & + vicen(:,:,:, iblk), vsnon(:,:,:, iblk), & + aim (:,:,:, iblk), trm (:,:,:,:,iblk)) enddo !$OMP END PARALLEL DO -!---!------------------------------------------------------------------- -!---! Optional conservation and monotonicity checks. -!---!------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Optional conservation and monotonicity checks. + !------------------------------------------------------------------- if (conserv_check) then - !------------------------------------------------------------------- - ! Compute initial values of globally conserved quantities. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute initial values of globally conserved quantities. + !------------------------------------------------------------------- do n = 0, ncat asum_init(n) = global_sum(aim(:,:,n,:), distrb_info, & @@ -458,7 +459,7 @@ subroutine transport_remap (dt) enddo ! n endif ! conserv_check - + if (l_monotonicity_check) then allocate(tmin(nx_block,ny_block,ntrace,ncat,max_blocks), & @@ -473,33 +474,33 @@ subroutine transport_remap (dt) !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - !------------------------------------------------------------------- - ! Compute masks. - ! Masks are used to prevent tracer values in cells without ice - ! from being used in the monotonicity check. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute masks. + ! Masks are used to prevent tracer values in cells without ice + ! from being used in the monotonicity check. + !------------------------------------------------------------------- call make_masks (nx_block, ny_block, & ilo, ihi, jlo, jhi, & nghost, ntrace, & has_dependents, & - icellsnc(:,iblk), & - indxinc(:,:,iblk), indxjnc(:,:,iblk), & - aim(:,:,:,iblk), aimask(:,:,:,iblk), & + icellsnc (:,iblk), & + indxinc(:,:,iblk), indxjnc(:,:, iblk), & + aim(:,:,:, iblk), aimask(:,:,:, iblk), & trm(:,:,:,:,iblk), trmask(:,:,:,:,iblk)) - !------------------------------------------------------------------- - ! Compute local max and min of tracer fields. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute local max and min of tracer fields. + !------------------------------------------------------------------- do n = 1, ncat - call local_max_min & + call local_max_min & (nx_block, ny_block, & ilo, ihi, jlo, jhi, & trm (:,:,:,n,iblk), & @@ -518,16 +519,16 @@ subroutine transport_remap (dt) !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do n = 1, ncat - call quasilocal_max_min (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - tmin(:,:,:,n,iblk), & + call quasilocal_max_min (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + tmin(:,:,:,n,iblk), & tmax(:,:,:,n,iblk)) enddo enddo @@ -535,38 +536,49 @@ subroutine transport_remap (dt) endif ! l_monotonicity_check - !------------------------------------------------------------------- - ! Main remapping routine: Step ice area and tracers forward in time. - !------------------------------------------------------------------- - - call horizontal_remap (dt, ntrace, & - uvel (:,:,:), vvel (:,:,:), & - aim (:,:,:,:), trm (:,:,:,:,:), & - l_fixed_area, & - tracer_type, depend, & - has_dependents, integral_order, & - l_dp_midpt) - - !------------------------------------------------------------------- - ! Given new fields, recompute state variables. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Main remapping routine: Step ice area and tracers forward in time. + !------------------------------------------------------------------- + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + call horizontal_remap (dt, ntrace, & + uvel (:,:,:), vvel (:,:,:), & + aim (:,:,:,:), trm(:,:,:,:,:), & + l_fixed_area, & + tracer_type, depend, & + has_dependents, integral_order, & + l_dp_midpt, grid_ice, & + uvelE (:,:,:), vvelN (:,:,:)) + else + call horizontal_remap (dt, ntrace, & + uvel (:,:,:), vvel (:,:,:), & + aim (:,:,:,:), trm(:,:,:,:,:), & + l_fixed_area, & + tracer_type, depend, & + has_dependents, integral_order, & + l_dp_midpt, grid_ice) + endif + + !------------------------------------------------------------------- + ! Given new fields, recompute state variables. + !------------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call tracers_to_state (nx_block, ny_block, & - ntrcr, ntrace, & - aim (:,:,:,iblk), trm (:,:,:,:,iblk), & - aice0(:,:, iblk), aicen(:,:,:,iblk), & - trcrn(:,:,:,:,iblk), & - vicen(:,:,:,iblk), vsnon(:,:, :,iblk)) + call tracers_to_state (nx_block, ny_block, & + ntrcr, ntrace, & + aim (:,:,:, iblk), trm (:,:,:,:,iblk), & + aice0(:,:, iblk), aicen(:,:,:, iblk), & + trcrn(:,:,:,:,iblk), & + vicen(:,:,:, iblk), vsnon(:,:, :,iblk)) enddo ! iblk !$OMP END PARALLEL DO - !------------------------------------------------------------------- - ! Ghost cell updates for state variables. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + !------------------------------------------------------------------- call ice_timer_start(timer_bound) @@ -576,14 +588,14 @@ subroutine transport_remap (dt) call ice_timer_stop(timer_bound) -!---!------------------------------------------------------------------- -!---! Optional conservation and monotonicity checks -!---!------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Optional conservation and monotonicity checks + !------------------------------------------------------------------- - !------------------------------------------------------------------- - ! Compute final values of globally conserved quantities. - ! Check global conservation of area and area*tracers. (Optional) - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute final values of globally conserved quantities. + ! Check global conservation of area and area*tracers. (Optional) + !------------------------------------------------------------------- if (conserv_check) then @@ -650,14 +662,14 @@ subroutine transport_remap (dt) endif ! conserv_check - !------------------------------------------------------------------- - ! Check tracer monotonicity. (Optional) - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Check tracer monotonicity. (Optional) + !------------------------------------------------------------------- if (l_monotonicity_check) then !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n,ckflag,istop,jstop) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -668,13 +680,12 @@ subroutine transport_remap (dt) jstop = 0 do n = 1, ncat - call check_monotonicity & - (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - tmin(:,:,:,n,iblk), tmax(:,:,:,n,iblk), & - aim (:,:, n,iblk), trm (:,:,:,n,iblk), & - ckflag, & - istop, jstop) + call check_monotonicity (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + tmin(:,:,:,n,iblk), tmax(:,:,:,n,iblk), & + aim (:,:, n,iblk), trm (:,:,:,n,iblk), & + ckflag, & + istop, jstop) if (ckflag) then write (nu_diag,*) 'istep1, my_task, iblk, cat =', & @@ -691,8 +702,8 @@ subroutine transport_remap (dt) endif ! l_monotonicity_check - call ice_timer_stop(timer_advect) ! advection - + call ice_timer_stop(timer_advect) ! advection + end subroutine transport_remap !======================================================================= @@ -708,37 +719,36 @@ subroutine transport_upwind (dt) use ice_domain_size, only: ncat, max_blocks use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, & uvel, vvel, trcr_depend, bound_state, trcr_base, & - n_trcr_strata, nt_strata - use ice_grid, only: HTE, HTN, tarea, tmask + n_trcr_strata, nt_strata, uvelE, vvelN + use ice_grid, only: HTE, HTN, tarea, tmask, grid_ice use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_bound, timer_advect - real (kind=dbl_kind), intent(in) :: & + real (kind=dbl_kind), intent(in) :: & dt ! time step ! local variables - integer (kind=int_kind) :: & - ntrcr, & ! + integer (kind=int_kind) :: & + ntrcr , & ! narr ! max number of state variable arrays - integer (kind=int_kind) :: & - i, j, iblk ,&! horizontal indices + integer (kind=int_kind) :: & + i, j, iblk , & ! horizontal indices ilo,ihi,jlo,jhi ! beginning and end of physical domain - real (kind=dbl_kind), dimension (nx_block,ny_block,nblocks) :: & + real (kind=dbl_kind), dimension (nx_block,ny_block,nblocks) :: & uee, vnn ! cell edge velocities - real (kind=dbl_kind), & - dimension (:,:,:,:), allocatable :: & + real (kind=dbl_kind), dimension (:,:,:,:), allocatable :: & works ! work array type (block) :: & - this_block ! block information for current block + this_block ! block information for current block character(len=*), parameter :: subname = '(transport_upwind)' - call ice_timer_start(timer_advect) ! advection + call ice_timer_start(timer_advect) ! advection call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_warnings_flush(nu_diag) @@ -749,61 +759,64 @@ subroutine transport_upwind (dt) allocate (works(nx_block,ny_block,narr,max_blocks)) - !------------------------------------------------------------------- - ! Get ghost cell values of state variables. - ! (Assume velocities are already known for ghost cells, also.) - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Get ghost cell values of state variables. + ! (Assume velocities are already known for ghost cells, also.) + !------------------------------------------------------------------- ! call bound_state (aicen, & ! vicen, vsnon, & ! ntrcr, trcrn) ! call ice_timer_start(timer_bound) -! call ice_HaloUpdate (uvel, halo_info, & +! call ice_HaloUpdate (uvel, halo_info, & ! field_loc_NEcorner, field_type_vector) -! call ice_HaloUpdate (vvel, halo_info, & +! call ice_HaloUpdate (vvel, halo_info, & ! field_loc_NEcorner, field_type_vector) ! call ice_timer_stop(timer_bound) - !------------------------------------------------------------------- - ! Average corner velocities to edges. - !------------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi + !------------------------------------------------------------------- + ! Average corner velocities to edges. + !------------------------------------------------------------------- + if (grid_ice == 'CD' .or. grid_ice == 'C') then + uee(:,:,:)=uvelE(:,:,:) + vnn(:,:,:)=vvelN(:,:,:) + else + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - uee(i,j,iblk) = p5*(uvel(i,j,iblk) + uvel(i,j-1,iblk)) - vnn(i,j,iblk) = p5*(vvel(i,j,iblk) + vvel(i-1,j,iblk)) - enddo + do j = jlo, jhi + do i = ilo, ihi + uee(i,j,iblk) = p5*(uvel(i,j,iblk) + uvel(i ,j-1,iblk)) + vnn(i,j,iblk) = p5*(vvel(i,j,iblk) + vvel(i-1,j ,iblk)) + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO + !$OMP END PARALLEL DO - call ice_timer_start(timer_bound) - call ice_HaloUpdate (uee, halo_info, & - field_loc_Eface, field_type_vector) - call ice_HaloUpdate (vnn, halo_info, & - field_loc_Nface, field_type_vector) - call ice_timer_stop(timer_bound) + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uee, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (vnn, halo_info, & + field_loc_Nface, field_type_vector) + call ice_timer_stop(timer_bound) + endif !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - - !----------------------------------------------------------------- - ! fill work arrays with fields to be advected - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! fill work arrays with fields to be advected + !----------------------------------------------------------------- call state_to_work (nx_block, ny_block, & ntrcr, & @@ -812,21 +825,21 @@ subroutine transport_upwind (dt) vicen (:,:, :,iblk), vsnon (:,:, :,iblk), & aice0 (:,:, iblk), works (:,:, :,iblk)) - !----------------------------------------------------------------- - ! advect - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! advect + !----------------------------------------------------------------- call upwind_field (nx_block, ny_block, & ilo, ihi, jlo, jhi, & dt, & narr, works(:,:,:,iblk), & - uee(:,:,iblk), vnn (:,:,iblk), & - HTE(:,:,iblk), HTN (:,:,iblk), & + uee (:,:,iblk), vnn (:,:,iblk), & + HTE (:,:,iblk), HTN (:,:,iblk), & tarea(:,:,iblk)) - !----------------------------------------------------------------- - ! convert work arrays back to state variables - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! convert work arrays back to state variables + !----------------------------------------------------------------- call work_to_state (nx_block, ny_block, & ntrcr, narr, & @@ -835,16 +848,16 @@ subroutine transport_upwind (dt) tmask(:,:, iblk), & aicen(:,:, :,iblk), trcrn (:,:,:,:,iblk), & vicen(:,:, :,iblk), vsnon (:,:, :,iblk), & - aice0(:,:, iblk), works (:,:, :,iblk)) + aice0(:,:, iblk), works (:,:, :,iblk)) enddo ! iblk !$OMP END PARALLEL DO - + deallocate (works) - !------------------------------------------------------------------- - ! Ghost cell updates for state variables. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + !------------------------------------------------------------------- call ice_timer_start(timer_bound) @@ -854,7 +867,7 @@ subroutine transport_upwind (dt) call ice_timer_stop(timer_bound) - call ice_timer_stop(timer_advect) ! advection + call ice_timer_stop(timer_advect) ! advection end subroutine transport_upwind @@ -864,7 +877,7 @@ end subroutine transport_upwind !======================================================================= ! ! Fill ice area and tracer arrays. -! Assume that the advected tracers are hicen, hsnon, trcrn, +! Assume that the advected tracers are hicen, hsnon, trcrn, ! qicen(1:nilyr), and qsnon(1:nslyr). ! This subroutine must be modified if a different set of tracers ! is to be transported. The rule for ordering tracers @@ -883,47 +896,47 @@ subroutine state_to_tracers (nx_block, ny_block, & use ice_domain_size, only: ncat, nslyr integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - ntrcr , & ! number of tracers in use - ntrace ! number of tracers in use incl. hi, hs + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + ntrace ! number of tracers in use incl. hi, hs real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - aice0 ! fractional open water area + aice0 ! fractional open water area real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(in) :: & - aicen ,&! fractional ice area - vicen ,&! volume per unit area of ice (m) - vsnon ! volume per unit area of snow (m) + aicen , & ! fractional ice area + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), intent(in) :: & - trcrn ! ice area tracers + trcrn ! ice area tracers real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), intent(out) :: & - aim ! mean ice area in each grid cell + aim ! mean ice area in each grid cell real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat), intent(out) :: & - trm ! mean tracer values in each grid cell + trm ! mean tracer values in each grid cell ! local variables integer (kind=int_kind) :: & - nt_qsno ,&! - i, j, n ,&! standard indices - it, kt ,&! tracer indices - ij ! combined i/j index + nt_qsno , & ! + i, j, n , & ! standard indices + it, kt , & ! tracer indices + ij ! combined i/j index real (kind=dbl_kind) :: & - puny ,&! - rhos ,&! - Lfresh ,&! - w1 ! work variable + puny , & ! + rhos , & ! + Lfresh , & ! + w1 ! work variable integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat) :: & - indxi ,&! compressed i/j indices - indxj + indxi , & ! compressed i/j indices + indxj integer (kind=int_kind), dimension(0:ncat) :: & - icells ! number of cells with ice + icells ! number of cells with ice character(len=*), parameter :: subname = '(state_to_tracers)' @@ -940,9 +953,9 @@ subroutine state_to_tracers (nx_block, ny_block, & trm(:,:,:,n) = c0 - !------------------------------------------------------------------- - ! Find grid cells where ice is present and fill area array. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Find grid cells where ice is present and fill area array. + !------------------------------------------------------------------- icells(n) = 0 do j = 1, ny_block @@ -956,13 +969,13 @@ subroutine state_to_tracers (nx_block, ny_block, & endif ! aim > puny enddo enddo - - !------------------------------------------------------------------- - ! Fill tracer array - ! Note: If aice > 0, then hice > 0, but we can have hsno = 0. - ! Alse note: We transport qice*nilyr rather than qice, so as to - ! avoid extra operations here and in tracers_to_state. - !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Fill tracer array + ! Note: If aice > 0, then hice > 0, but we can have hsno = 0. + ! Alse note: We transport qice*nilyr rather than qice, so as to + ! avoid extra operations here and in tracers_to_state. + !------------------------------------------------------------------- do ij = 1, icells(n) i = indxi(ij,n) @@ -989,7 +1002,7 @@ subroutine state_to_tracers (nx_block, ny_block, & endif enddo enddo ! ncat - + end subroutine state_to_tracers !======================================================================= @@ -1008,42 +1021,42 @@ subroutine tracers_to_state (nx_block, ny_block, & use ice_domain_size, only: ncat, nslyr integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - ntrcr , & ! number of tracers in use - ntrace ! number of tracers in use incl. hi, hs + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + ntrace ! number of tracers in use incl. hi, hs real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), intent(in) :: & - aim ! fractional ice area + aim ! fractional ice area real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat), intent(in) :: & - trm ! mean tracer values in each grid cell + trm ! mean tracer values in each grid cell real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - aice0 ! fractional ice area + aice0 ! fractional ice area real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(inout) :: & - aicen ,&! fractional ice area - vicen ,&! volume per unit area of ice (m) - vsnon ! volume per unit area of snow (m) + aicen , & ! fractional ice area + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), intent(inout) :: & - trcrn ! tracers + trcrn ! tracers ! local variables integer (kind=int_kind) :: & - nt_qsno ,&! - i, j, n ,&! standard indices - it, kt ,&! tracer indices - icells ,&! number of cells with ice - ij + nt_qsno , & ! + i, j, n , & ! standard indices + it, kt , & ! tracer indices + icells , & ! number of cells with ice + ij real (kind=dbl_kind) :: & - rhos, & - Lfresh + rhos , & ! + Lfresh ! integer (kind=int_kind), dimension (nx_block*ny_block) :: & - indxi, indxj ! compressed indices + indxi, indxj ! compressed indices character(len=*), parameter :: subname = '(tracers_to_state)' @@ -1057,20 +1070,20 @@ subroutine tracers_to_state (nx_block, ny_block, & do n = 1, ncat - icells = 0 - do j = 1, ny_block - do i = 1, nx_block - if (aim(i,j,n) > c0) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif - enddo - enddo + icells = 0 + do j = 1, ny_block + do i = 1, nx_block + if (aim(i,j,n) > c0) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo - !------------------------------------------------------------------- - ! Compute state variables. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute state variables. + !------------------------------------------------------------------- do ij = 1, icells i = indxi(ij) @@ -1088,7 +1101,7 @@ subroutine tracers_to_state (nx_block, ny_block, & j = indxj(ij) trcrn(i,j,it,n) = trm(i,j,kt+it,n) - rhos*Lfresh ! snow enthalpy enddo - else + else do ij = 1, icells i = indxi(ij) j = indxj(ij) @@ -1115,24 +1128,24 @@ subroutine global_conservation (ckflag, fieldid, & fieldid ! field information string real (kind=dbl_kind), intent(in) :: & - asum_init ,&! initial global ice area + asum_init , & ! initial global ice area asum_final ! final global ice area real (kind=dbl_kind), dimension(ntrace), intent(in), optional :: & - atsum_init ,&! initial global ice area*tracer + atsum_init, & ! initial global ice area*tracer atsum_final ! final global ice area*tracer logical (kind=log_kind), intent(inout) :: & - ckflag ! if true, abort on return + ckflag ! if true, abort on return ! local variables integer (kind=int_kind) :: & - nt ! tracer index + nt ! tracer index real (kind=dbl_kind) :: & - puny ,&! - diff ! difference between initial and final values + puny , & ! + diff ! difference between initial and final values character(len=*), parameter :: subname = '(global_conservation)' @@ -1155,21 +1168,21 @@ subroutine global_conservation (ckflag, fieldid, & endif if (present(atsum_init)) then - do nt = 1, ntrace - if (abs(atsum_init(nt)) > puny) then - diff = atsum_final(nt) - atsum_init(nt) - if (abs(diff/atsum_init(nt)) > puny) then - ckflag = .true. - write (nu_diag,*) - write (nu_diag,*) subname,'Ice area*tracer conserv error ', trim(fieldid),nt - write (nu_diag,*) subname,' Tracer index =', nt - write (nu_diag,*) subname,' Initial global area*tracer =', atsum_init(nt) - write (nu_diag,*) subname,' Final global area*tracer =', atsum_final(nt) - write (nu_diag,*) subname,' Fractional error =', abs(diff)/atsum_init(nt) - write (nu_diag,*) subname,' atsum_final-atsum_init =', diff + do nt = 1, ntrace + if (abs(atsum_init(nt)) > puny) then + diff = atsum_final(nt) - atsum_init(nt) + if (abs(diff/atsum_init(nt)) > puny) then + ckflag = .true. + write (nu_diag,*) + write (nu_diag,*) subname,'Ice area*tracer conserv error ', trim(fieldid),nt + write (nu_diag,*) subname,' Tracer index =', nt + write (nu_diag,*) subname,' Initial global area*tracer =', atsum_init(nt) + write (nu_diag,*) subname,' Final global area*tracer =', atsum_final(nt) + write (nu_diag,*) subname,' Fractional error =', abs(diff)/atsum_init(nt) + write (nu_diag,*) subname,' atsum_final-atsum_init =', diff + endif endif - endif - enddo + enddo endif ! present(atsum_init) end subroutine global_conservation @@ -1179,7 +1192,7 @@ end subroutine global_conservation ! At each grid point, compute the local max and min of a scalar ! field phi: i.e., the max and min values in the nine-cell region ! consisting of the home cell and its eight neighbors. -! +! ! To extend to the neighbors of the neighbors (25 cells in all), ! follow this call with a call to quasilocal_max_min. ! @@ -1192,33 +1205,33 @@ subroutine local_max_min (nx_block, ny_block, & aimask, trmask) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ! beginning and end of physical domain + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain real (kind=dbl_kind), intent(in), dimension(nx_block,ny_block) :: & - aimask ! ice area mask + aimask ! ice area mask real (kind=dbl_kind), intent(in), dimension (nx_block,ny_block,ntrace) :: & - trm ,&! tracer fields - trmask ! tracer mask + trm , & ! tracer fields + trmask ! tracer mask real (kind=dbl_kind), intent(out), dimension (nx_block,ny_block,ntrace) :: & - tmin ,&! local min tracer - tmax ! local max tracer + tmin , & ! local min tracer + tmax ! local max tracer ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices - nt, nt1 ! tracer indices + i, j , & ! horizontal indices + nt, nt1 ! tracer indices real (kind=dbl_kind), dimension(nx_block,ny_block) :: & - phimask ! aimask or trmask, as appropriate + phimask ! aimask or trmask, as appropriate real (kind=dbl_kind) :: & - phi_nw, phi_n, phi_ne ,&! field values in 8 neighbor cells - phi_w, phi_e ,& - phi_sw, phi_s, phi_se + phi_nw, phi_n, phi_ne , & ! field values in 8 neighbor cells + phi_w , phi_e , & + phi_sw, phi_s, phi_se character(len=*), parameter :: subname = '(local_max_min)' @@ -1243,46 +1256,46 @@ subroutine local_max_min (nx_block, ny_block, & endif -!----------------------------------------------------------------------- -! Store values of trm in the 8 neighbor cells. -! If aimask = 1, use the true value; otherwise use the home cell value -! so that non-physical values of phi do not contribute to the gradient. -!----------------------------------------------------------------------- + !----------------------------------------------------------------------- + ! Store values of trm in the 8 neighbor cells. + ! If aimask = 1, use the true value; otherwise use the home cell value + ! so that non-physical values of phi do not contribute to the gradient. + !----------------------------------------------------------------------- do j = jlo, jhi - do i = ilo, ihi - - phi_nw = phimask(i-1,j+1) * trm(i-1,j+1,nt) & - + (c1-phimask(i-1,j+1))* trm(i, j, nt) - phi_n = phimask(i, j+1) * trm(i, j+1,nt) & - + (c1-phimask(i, j+1))* trm(i, j, nt) - phi_ne = phimask(i+1,j+1) * trm(i+1,j+1,nt) & - + (c1-phimask(i+1,j+1))* trm(i, j, nt) - phi_w = phimask(i-1,j) * trm(i-1,j, nt) & - + (c1-phimask(i-1,j)) * trm(i, j, nt) - phi_e = phimask(i+1,j) * trm(i+1,j, nt) & - + (c1-phimask(i+1,j)) * trm(i, j, nt) - phi_sw = phimask(i-1,j-1) * trm(i-1,j-1,nt) & - + (c1-phimask(i-1,j-1))* trm(i, j, nt) - phi_s = phimask(i, j-1) * trm(i, j-1,nt) & - + (c1-phimask(i, j-1))* trm(i, j, nt) - phi_se = phimask(i+1,j-1) * trm(i+1,j-1,nt) & - + (c1-phimask(i+1,j-1))* trm(i, j, nt) - -!----------------------------------------------------------------------- -! Compute the minimum and maximum among the nine local cells. -!----------------------------------------------------------------------- - - tmax(i,j,nt) = max (phi_nw, phi_n, phi_ne, phi_w, & - trm(i,j,nt), phi_e, phi_sw, phi_s, phi_se) - - tmin(i,j,nt) = min (phi_nw, phi_n, phi_ne, phi_w, & - trm(i,j,nt), phi_e, phi_sw, phi_s, phi_se) - - enddo ! i - enddo ! j + do i = ilo, ihi - enddo ! nt + phi_nw = phimask(i-1,j+1) * trm(i-1,j+1,nt) & + + (c1-phimask(i-1,j+1))* trm(i, j, nt) + phi_n = phimask(i, j+1) * trm(i, j+1,nt) & + + (c1-phimask(i, j+1))* trm(i, j, nt) + phi_ne = phimask(i+1,j+1) * trm(i+1,j+1,nt) & + + (c1-phimask(i+1,j+1))* trm(i, j, nt) + phi_w = phimask(i-1,j) * trm(i-1,j, nt) & + + (c1-phimask(i-1,j)) * trm(i, j, nt) + phi_e = phimask(i+1,j) * trm(i+1,j, nt) & + + (c1-phimask(i+1,j)) * trm(i, j, nt) + phi_sw = phimask(i-1,j-1) * trm(i-1,j-1,nt) & + + (c1-phimask(i-1,j-1))* trm(i, j, nt) + phi_s = phimask(i, j-1) * trm(i, j-1,nt) & + + (c1-phimask(i, j-1))* trm(i, j, nt) + phi_se = phimask(i+1,j-1) * trm(i+1,j-1,nt) & + + (c1-phimask(i+1,j-1))* trm(i, j, nt) + + !----------------------------------------------------------------------- + ! Compute the minimum and maximum among the nine local cells. + !----------------------------------------------------------------------- + + tmax(i,j,nt) = max (phi_nw, phi_n, phi_ne, phi_w, & + trm(i,j,nt), phi_e, phi_sw, phi_s, phi_se) + + tmin(i,j,nt) = min (phi_nw, phi_n, phi_ne, phi_w, & + trm(i,j,nt), phi_e, phi_sw, phi_s, phi_se) + + enddo ! i + enddo ! j + + enddo ! nt end subroutine local_max_min @@ -1299,18 +1312,18 @@ subroutine quasilocal_max_min (nx_block, ny_block, & tmin, tmax) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ! beginning and end of physical domain + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain real (kind=dbl_kind), intent(inout), dimension (nx_block,ny_block,ntrace) :: & - tmin ,&! local min tracer - tmax ! local max tracer + tmin , & ! local min tracer + tmax ! local max tracer ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices - nt ! tracer index + i, j , & ! horizontal indices + nt ! tracer index character(len=*), parameter :: subname = '(quasilocal_max_min)' @@ -1351,37 +1364,37 @@ subroutine check_monotonicity (nx_block, ny_block, & istop, jstop) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ! beginning and end of physical domain + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain real (kind=dbl_kind), intent(in), dimension (nx_block,ny_block) :: & - aim ! new ice area + aim ! new ice area real (kind=dbl_kind), intent(in), dimension (nx_block,ny_block,ntrace) :: & - trm ! new tracers + trm ! new tracers real (kind=dbl_kind), intent(in), dimension (nx_block,ny_block,ntrace) :: & - tmin ,&! local min tracer - tmax ! local max tracer + tmin , & ! local min tracer + tmax ! local max tracer logical (kind=log_kind), intent(inout) :: & - ckflag ! if true, abort on return + ckflag ! if true, abort on return integer (kind=int_kind), intent(inout) :: & - istop, jstop ! indices of grid cell where model aborts + istop, jstop ! indices of grid cell where model aborts ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices - nt, nt1, nt2 ! tracer indices + i, j , & ! horizontal indices + nt, nt1, nt2 ! tracer indices real (kind=dbl_kind) :: & - puny ,&! - w1, w2 ! work variables + puny , & ! + w1, w2 ! work variables logical (kind=log_kind), dimension (nx_block, ny_block) :: & - l_check ! if true, check monotonicity + l_check ! if true, check monotonicity character(len=*), parameter :: subname = '(check_monotonicity)' @@ -1392,15 +1405,15 @@ subroutine check_monotonicity (nx_block, ny_block, & do nt = 1, ntrace - !------------------------------------------------------------------- - ! Load logical array to identify tracers that need checking. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Load logical array to identify tracers that need checking. + !------------------------------------------------------------------- if (tracer_type(nt)==1) then ! does not depend on another tracer do j = jlo, jhi do i = ilo, ihi - if (aim(i,j) > puny) then + if (aim(i,j) > puny) then l_check(i,j) = .true. else l_check(i,j) = .false. @@ -1437,9 +1450,9 @@ subroutine check_monotonicity (nx_block, ny_block, & enddo endif - !------------------------------------------------------------------- - ! Make sure new values lie between tmin and tmax - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Make sure new values lie between tmin and tmax + !------------------------------------------------------------------- do j = jlo, jhi do i = ilo, ihi @@ -1496,24 +1509,24 @@ subroutine state_to_work (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions ntrcr , & ! number of tracers in use - narr ! number of 2D state variable arrays in works array + narr ! number of 2D state variable arrays in works array integer (kind=int_kind), dimension (ntrcr), intent(in) :: & trcr_depend ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(in) :: & - aicen ,&! concentration of ice - vicen ,&! volume per unit area of ice (m) - vsnon ! volume per unit area of snow (m) + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), intent(in) :: & - trcrn ! ice tracers + trcrn ! ice tracers real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - aice0 ! concentration of open water + aice0 ! concentration of open water real (kind=dbl_kind), dimension(nx_block,ny_block,narr), intent (out) :: & - works ! work array + works ! work array ! local variables @@ -1524,8 +1537,8 @@ subroutine state_to_work (nx_block, ny_block, & tr_pond_cesm, tr_pond_lvl, tr_pond_topo integer (kind=int_kind) :: & - i, j, n, it ,&! counting indices - narrays ! counter for number of state variable arrays + i, j, n, it, & ! counting indices + narrays ! counter for number of state variable arrays character(len=*), parameter :: subname = '(state_to_work)' @@ -1584,36 +1597,36 @@ subroutine state_to_work (nx_block, ny_block, & elseif (trcr_depend(it) == 2+nt_alvl) then do j = 1, ny_block do i = 1, nx_block - works(i,j,narrays+it) = aicen(i,j,n) & + works(i,j,narrays+it) = aicen(i,j ,n) & * trcrn(i,j,nt_alvl,n) & - * trcrn(i,j,it,n) + * trcrn(i,j,it ,n) enddo enddo elseif (trcr_depend(it) == 2+nt_apnd .and. & tr_pond_cesm .or. tr_pond_topo) then do j = 1, ny_block do i = 1, nx_block - works(i,j,narrays+it) = aicen(i,j,n) & + works(i,j,narrays+it) = aicen(i,j ,n) & * trcrn(i,j,nt_apnd,n) & - * trcrn(i,j,it,n) + * trcrn(i,j,it ,n) enddo enddo elseif (trcr_depend(it) == 2+nt_apnd .and. & tr_pond_lvl) then do j = 1, ny_block do i = 1, nx_block - works(i,j,narrays+it) = aicen(i,j,n) & + works(i,j,narrays+it) = aicen(i,j ,n) & * trcrn(i,j,nt_alvl,n) & * trcrn(i,j,nt_apnd,n) & - * trcrn(i,j,it,n) + * trcrn(i,j,it ,n) enddo enddo elseif (trcr_depend(it) == 2+nt_fbri) then do j = 1, ny_block do i = 1, nx_block - works(i,j,narrays+it) = vicen(i,j,n) & + works(i,j,narrays+it) = vicen(i,j ,n) & * trcrn(i,j,nt_fbri,n) & - * trcrn(i,j,it,n) + * trcrn(i,j,it ,n) enddo enddo endif @@ -1631,23 +1644,23 @@ end subroutine state_to_work ! ! Convert work array back to state variables - subroutine work_to_state (nx_block, ny_block, & - ntrcr, narr, & - trcr_depend, & - trcr_base, & - n_trcr_strata, & - nt_strata, & - tmask, & - aicen, trcrn, & - vicen, vsnon, & - aice0, works) + subroutine work_to_state (nx_block, ny_block, & + ntrcr, narr, & + trcr_depend, & + trcr_base, & + n_trcr_strata, & + nt_strata, & + tmask, & + aicen, trcrn, & + vicen, vsnon, & + aice0, works) use ice_domain_size, only: ncat - integer (kind=int_kind), intent (in) :: & + integer (kind=int_kind), intent (in) :: & nx_block, ny_block, & ! block dimensions ntrcr , & ! number of tracers in use - narr ! number of 2D state variable arrays in works array + narr ! number of 2D state variable arrays in works array integer (kind=int_kind), dimension (ntrcr), intent(in) :: & trcr_depend, & ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon @@ -1660,36 +1673,36 @@ subroutine work_to_state (nx_block, ny_block, & integer (kind=int_kind), dimension (ntrcr,2), intent(in) :: & nt_strata ! indices of underlying tracer layers - logical (kind=log_kind), intent (in) :: & + logical (kind=log_kind), intent (in) :: & tmask (nx_block,ny_block) - real (kind=dbl_kind), intent (in) :: & + real (kind=dbl_kind), intent (in) :: & works (nx_block,ny_block,narr) real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(out) :: & - aicen ,&! concentration of ice - vicen ,&! volume per unit area of ice (m) - vsnon ! volume per unit area of snow (m) + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat),intent(out) :: & - trcrn ! ice tracers + trcrn ! ice tracers real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - aice0 ! concentration of open water + aice0 ! concentration of open water ! local variables - integer (kind=int_kind) :: & - i, j, ij, n ,&! counting indices - narrays ,&! counter for number of state variable arrays - nt_Tsfc ,&! Tsfc tracer number - icells ! number of ocean/ice cells + integer (kind=int_kind) :: & + i, j, ij, n, & ! counting indices + narrays , & ! counter for number of state variable arrays + nt_Tsfc , & ! Tsfc tracer number + icells ! number of ocean/ice cells - integer (kind=int_kind), dimension (nx_block*ny_block) :: & + integer (kind=int_kind), dimension (nx_block*ny_block) :: & indxi, indxj - real (kind=dbl_kind), dimension (nx_block*ny_block,narr) :: & - work + real (kind=dbl_kind), dimension (nx_block*ny_block,narr) :: & + work character(len=*), parameter :: subname = '(work_to_state)' @@ -1731,15 +1744,16 @@ subroutine work_to_state (nx_block, ny_block, & i = indxi(ij) j = indxj(ij) - call icepack_compute_tracers(ntrcr=ntrcr, trcr_depend=trcr_depend(:), & - atrcrn = work (ij,narrays+1:narrays+ntrcr), & - aicen = aicen(i,j,n), & - vicen = vicen(i,j,n), & - vsnon = vsnon(i,j,n), & + call icepack_compute_tracers(ntrcr = ntrcr, & + trcr_depend = trcr_depend(:), & + atrcrn = work (ij,narrays+1:narrays+ntrcr), & + aicen = aicen(i,j,n), & + vicen = vicen(i,j,n), & + vsnon = vsnon(i,j,n), & trcr_base = trcr_base(:,:), & n_trcr_strata = n_trcr_strata(:), & nt_strata = nt_strata(:,:), & - trcrn = trcrn(i,j,:,n)) + trcrn = trcrn(i,j,:,n)) ! tcraig, don't let land points get non-zero Tsfc if (.not.tmask(i,j)) then @@ -1771,53 +1785,53 @@ subroutine upwind_field (nx_block, ny_block, & tarea) integer (kind=int_kind), intent (in) :: & - nx_block, ny_block ,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain - narrays ! number of 2D arrays to be transported + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + narrays ! number of 2D arrays to be transported real (kind=dbl_kind), intent(in) :: & - dt ! time step + dt ! time step real (kind=dbl_kind), dimension(nx_block,ny_block,narrays), intent(inout) :: & - phi ! scalar field + phi ! scalar field real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & - uee, vnn ! cell edge velocities + uee, vnn ! cell edge velocities real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & - HTE ,&! length of east cell edge - HTN ,&! length of north cell edge - tarea ! grid cell area + HTE , & ! length of east cell edge + HTN , & ! length of north cell edge + tarea ! grid cell area ! local variables integer (kind=int_kind) :: & - i, j, n ! standard indices + i, j, n ! standard indices real (kind=dbl_kind), dimension (nx_block,ny_block) :: & worka, workb character(len=*), parameter :: subname = '(upwind_field)' - !------------------------------------------------------------------- - ! upwind transport - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! upwind transport + !------------------------------------------------------------------- do n = 1, narrays do j = 1, jhi do i = 1, ihi worka(i,j)= & - upwind(phi(i,j,n),phi(i+1,j,n),uee(i,j),HTE(i,j),dt) + upwind(phi(i,j,n),phi(i+1,j ,n),uee(i,j),HTE(i,j),dt) workb(i,j)= & - upwind(phi(i,j,n),phi(i,j+1,n),vnn(i,j),HTN(i,j),dt) + upwind(phi(i,j,n),phi(i ,j+1,n),vnn(i,j),HTN(i,j),dt) enddo enddo do j = jlo, jhi do i = ilo, ihi - phi(i,j,n) = phi(i,j,n) - ( worka(i,j)-worka(i-1,j) & - + workb(i,j)-workb(i,j-1) ) & + phi(i,j,n) = phi(i,j,n) - ( worka(i,j)-worka(i-1,j ) & + + workb(i,j)-workb(i ,j-1) ) & / tarea(i,j) enddo enddo @@ -1827,10 +1841,9 @@ subroutine upwind_field (nx_block, ny_block, & end subroutine upwind_field !======================================================================= - - !------------------------------------------------------------------- - ! Define upwind function - !------------------------------------------------------------------- +! +! Define upwind function +! real(kind=dbl_kind) function upwind(y1,y2,a,h,dt) diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index 75489bd5e..95ae33613 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -1,5 +1,4 @@ !======================================================================= -! ! Transports quantities using the second-order conservative remapping ! scheme developed by John Dukowicz and John Baumgardner (DB) and modified ! for sea ice by William Lipscomb and Elizabeth Hunke. @@ -20,11 +19,11 @@ ! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb ! 2004-05: Block structure added (WHL) ! 2006: Moved remap driver to ice_transport_driver -! Geometry changes: +! Geometry changes: ! (1) Reconstruct fields in stretched logically rectangular coordinates ! (2) Modify geometry so that the area flux across each edge ! can be specified (following an idea of Mats Bentsen) -! 2010: ECH removed unnecessary grid arrays and optional arguments from +! 2010: ECH removed unnecessary grid arrays and optional arguments from ! horizontal_remap module ice_transport_remap @@ -52,7 +51,7 @@ module ice_transport_remap nvert = 3 ! number of vertices in a triangle ! for triangle integral formulas - real (kind=dbl_kind), parameter :: & + real (kind=dbl_kind), parameter :: & p5625m = -9._dbl_kind/16._dbl_kind ,& p52083 = 25._dbl_kind/48._dbl_kind @@ -60,141 +59,141 @@ module ice_transport_remap !======================================================================= ! Here is some information about how the incremental remapping scheme -! works in CICE and how it can be adapted for use in other models. +! works in CICE and how it can be adapted for use in other models. ! -! The remapping routine is designed to transport a generic mass-like +! The remapping routine is designed to transport a generic mass-like ! field (in CICE, the ice fractional area) along with an arbitrary number -! of tracers in two dimensions. The velocity components are assumed -! to lie at grid cell corners and the transported scalars at cell centers. -! Incremental remapping has the following desirable properties: -! -! (1) Tracer monotonicity is preserved. That is, no new local -! extrema are produced in fields like ice thickness or internal -! energy. -! (2) The reconstucted mass and tracer fields vary linearly in x and y. -! This means that remapping is 2nd-order accurate in space, -! except where horizontal gradients are limited to preserve -! monotonicity. -! (3) There are economies of scale. Transporting a single field -! is rather expensive, but additional fields have a relatively -! low marginal cost. -! -! The following generic conservation equations may be solved: -! -! dm/dt = del*(u*m) (0) -! d(m*T1)/dt = del*(u*m*T1) (1) -! d(m*T1*T2)/dt = del*(u*m*T1*T2) (2) -! d(m*T1*T2*T3)/dt = del*(u*m*T1*T2*T3) (3) +! of tracers in two dimensions. The velocity components are assumed +! to lie at grid cell corners and the transported scalars at cell centers. +! Incremental remapping has the following desirable properties: +! +! (1) Tracer monotonicity is preserved. That is, no new local +! extrema are produced in fields like ice thickness or internal +! energy. +! (2) The reconstucted mass and tracer fields vary linearly in x and y. +! This means that remapping is 2nd-order accurate in space, +! except where horizontal gradients are limited to preserve +! monotonicity. +! (3) There are economies of scale. Transporting a single field +! is rather expensive, but additional fields have a relatively +! low marginal cost. +! +! The following generic conservation equations may be solved: +! +! dm/dt = del*(u*m) (0) +! d(m*T1)/dt = del*(u*m*T1) (1) +! d(m*T1*T2)/dt = del*(u*m*T1*T2) (2) +! d(m*T1*T2*T3)/dt = del*(u*m*T1*T2*T3) (3) ! ! where d is a partial derivative, del is the 2D divergence operator, ! u is the horizontal velocity, m is the mass density field, and ! T1, T2, and T3 are tracers. ! ! In CICE, these equations have the form -! +! ! da/dt = del*(u*a) (4) ! dv/dt = d(a*h)/dt = del*(u*a*h) (5) ! de/dt = d(a*h*q)/dt = del*(u*a*h*q) (6) ! d(aT)/dt = del*(u*a*t) (7) -! -! where a = fractional ice area, v = ice/snow volume, h = v/a = thickness, -! e = ice/snow internal energy (J/m^2), q = e/v = internal energy per -! unit volume (J/m^3), and T is a tracer. These equations express +! +! where a = fractional ice area, v = ice/snow volume, h = v/a = thickness, +! e = ice/snow internal energy (J/m^2), q = e/v = internal energy per +! unit volume (J/m^3), and T is a tracer. These equations express ! conservation of ice area, volume, internal energy, and area-weighted -! tracer, respectively. +! tracer, respectively. ! ! (Note: In CICE, a, v and e are prognostic quantities from which ! h and q are diagnosed. The remapping routine works with tracers, ! which means that h and q must be derived from a, v, and e before -! calling the remapping routine.) +! calling the remapping routine.) +! +! Earlier versions of CICE assumed fixed ice and snow density. +! Beginning with CICE 4.0, the ice and snow density can be variable. +! In this case, equations (5) and (6) are replaced by ! -! Earlier versions of CICE assumed fixed ice and snow density. -! Beginning with CICE 4.0, the ice and snow density can be variable. -! In this case, equations (5) and (6) are replaced by -! -! dv/dt = d(a*h)/dt = del*(u*a*h) (8) +! dv/dt = d(a*h)/dt = del*(u*a*h) (8) ! dm/dt = d(a*h*rho)/dt = del*(u*a*h*rho) (9) ! de/dt = d(a*h*rho*qm)/dt = del*(u*a*h*rho*qm) (10) -! -! where rho = density and qm = internal energy per unit mass (J/kg). -! Eq. (9) expresses mass conservation, which in the variable-density -! case is no longer equivalent to volume conservation (8). -! -! Tracers satisfying equations of the form (1) are called "type 1." -! In CICE the paradigmatic type 1 tracers are hi and hs. -! -! Tracers satisfying equations of the form (2) are called "type 2". -! The paradigmatic type 2 tracers are qi and qs (or rhoi and rhos -! in the variable-density case). -! +! +! where rho = density and qm = internal energy per unit mass (J/kg). +! Eq. (9) expresses mass conservation, which in the variable-density +! case is no longer equivalent to volume conservation (8). +! +! Tracers satisfying equations of the form (1) are called "type 1." +! In CICE the paradigmatic type 1 tracers are hi and hs. +! +! Tracers satisfying equations of the form (2) are called "type 2". +! The paradigmatic type 2 tracers are qi and qs (or rhoi and rhos +! in the variable-density case). +! ! Tracers satisfying equations of the form (3) are called "type 3." ! The paradigmatic type 3 tracers are qmi and qms in the variable-density -! case. There are no such tracers in the constant-density case. -! -! The fields a, T1, and T2 are reconstructed in each grid cell with -! 2nd-order accuracy. T3 is reconstructed with 1st-order accuracy -! (i.e., it is transported in upwind fashion) in order to avoid -! additional mathematical complexity. -! -! The mass-like field lives in the array "mm" (shorthand for mean -! mass) and the tracers fields in the array "tm" (mean tracers). -! In order to transport tracers correctly, the remapping routine -! needs to know the tracers types and relationships. This is done -! as follows: -! -! Each field in the "tm" array is assigned an index, 1:ntrace. -! (Note: ntrace is not the same as ntrcr, the number of tracers -! in the trcrn state variable array. For remapping purposes we -! have additional tracers hi and hs.) -! -! The tracer types (1,2,3) are contained in the "tracer_type" array. -! For standard CICE: -! -! tracer_type = (1 1 1 2 2 2 2 2) -! -! Type 2 and type 3 tracers are said to depend on type 1 tracers. -! For instance, qi depends on hi, which is to say that -! there is a conservation equation of the form (2) or (6). -! Thus we define a "depend" array. For standard CICE: -! -! depend = (0 0 0 1 1 1 1 2) -! -! which implies that elements 1-3 (hi, hs, Ts) are type 1, -! elements 4-7 (qi) depend on element 1 (hi), and element 8 (qs) -! depends on element 2 (hs). -! -! We also define a logical array "has_dependents". In standard CICE: -! -! has_dependents = (T T F F F F F F), -! -! which means that only elements 1 and 2 (hi and hs) have dependent -! tracers. -! -! For the variable-density case, things are a bit more complicated. -! Suppose we have 4 variable-density ice layers and one variable- -! density snow layer. Then the indexing is as follows: -! 1 = hi -! 2 = hs -! 3 = Ts -! 4-7 = rhoi -! 8 = rhos -! 9-12 = qmi -! 13 = qms -! -! The key arrays are: -! -! tracer_type = (1 1 1 2 2 2 2 2 3 3 3 3 3) -! -! depend = (0 0 0 1 1 1 1 2 4 5 6 7 8) -! -! has_dependents = (T T F T T T T T F F F F F) -! -! which imply that hi and hs are type 1 with dependents rhoi and rhos, -! while rhoi and rhos are type 2 with dependents qmi and qms. -! -! Tracers added to the ntrcr array are handled automatically -! by the remapping with little extra coding. It is necessary -! only to provide the correct type and dependency information. +! case. There are no such tracers in the constant-density case. +! +! The fields a, T1, and T2 are reconstructed in each grid cell with +! 2nd-order accuracy. T3 is reconstructed with 1st-order accuracy +! (i.e., it is transported in upwind fashion) in order to avoid +! additional mathematical complexity. +! +! The mass-like field lives in the array "mm" (shorthand for mean +! mass) and the tracers fields in the array "tm" (mean tracers). +! In order to transport tracers correctly, the remapping routine +! needs to know the tracers types and relationships. This is done +! as follows: +! +! Each field in the "tm" array is assigned an index, 1:ntrace. +! (Note: ntrace is not the same as ntrcr, the number of tracers +! in the trcrn state variable array. For remapping purposes we +! have additional tracers hi and hs.) +! +! The tracer types (1,2,3) are contained in the "tracer_type" array. +! For standard CICE: +! +! tracer_type = (1 1 1 2 2 2 2 2) +! +! Type 2 and type 3 tracers are said to depend on type 1 tracers. +! For instance, qi depends on hi, which is to say that +! there is a conservation equation of the form (2) or (6). +! Thus we define a "depend" array. For standard CICE: +! +! depend = (0 0 0 1 1 1 1 2) +! +! which implies that elements 1-3 (hi, hs, Ts) are type 1, +! elements 4-7 (qi) depend on element 1 (hi), and element 8 (qs) +! depends on element 2 (hs). +! +! We also define a logical array "has_dependents". In standard CICE: +! +! has_dependents = (T T F F F F F F), +! +! which means that only elements 1 and 2 (hi and hs) have dependent +! tracers. +! +! For the variable-density case, things are a bit more complicated. +! Suppose we have 4 variable-density ice layers and one variable- +! density snow layer. Then the indexing is as follows: +! 1 = hi +! 2 = hs +! 3 = Ts +! 4-7 = rhoi +! 8 = rhos +! 9-12 = qmi +! 13 = qms +! +! The key arrays are: +! +! tracer_type = (1 1 1 2 2 2 2 2 3 3 3 3 3) +! +! depend = (0 0 0 1 1 1 1 2 4 5 6 7 8) +! +! has_dependents = (T T F T T T T T F F F F F) +! +! which imply that hi and hs are type 1 with dependents rhoi and rhos, +! while rhoi and rhos are type 2 with dependents qmi and qms. +! +! Tracers added to the ntrcr array are handled automatically +! by the remapping with little extra coding. It is necessary +! only to provide the correct type and dependency information. ! ! When using this routine in other models, most of the tracer dependency ! apparatus may be irrelevant. In a layered ocean model, for example, @@ -237,7 +236,7 @@ module ice_transport_remap ! regions are then tweaked, following an idea by Mats Bentsen, such ! that they have the desired area. If l_fixed_area = F, these regions ! are not tweaked, and the edgearea arrays are output variables. -! +! !======================================================================= contains @@ -247,7 +246,7 @@ module ice_transport_remap ! Grid quantities used by the remapping transport scheme ! ! Note: the arrays xyav, xxxav, etc are not needed for rectangular grids -! but may be needed in the future for other nonuniform grids. They have +! but may be needed in the future for other nonuniform grids. They have ! been commented out here to save memory and flops. ! ! author William H. Lipscomb, LANL @@ -277,7 +276,7 @@ subroutine init_remap xav(i,j,iblk) = c0 yav(i,j,iblk) = c0 !!! These formulas would be used on a rectangular grid -!!! with dimensions (dxt, dyt): +!!! with dimensions (dxt, dyt): !!! xxav(i,j,iblk) = dxt(i,j,iblk)**2 / c12 !!! yyav(i,j,iblk) = dyt(i,j,iblk)**2 / c12 xxav(i,j,iblk) = c1/c12 @@ -291,7 +290,7 @@ subroutine init_remap enddo enddo !$OMP END PARALLEL DO - + end subroutine init_remap !======================================================================= @@ -302,24 +301,25 @@ end subroutine init_remap ! ! This scheme preserves monotonicity of ice area and tracers. That is, ! it does not produce new extrema. It is second-order accurate in space, -! except where gradients are limited to preserve monotonicity. +! except where gradients are limited to preserve monotonicity. ! ! This version of the remapping allows the user to specify the areal ! flux across each edge, based on an idea developed by Mats Bentsen. ! ! author William H. Lipscomb, LANL -! 2006: Moved driver (subroutine transport_remap) into separate module. +! 2006: Moved driver (subroutine transport_remap) into separate module. ! Geometry changes (logically rectangular coordinates, fixed ! area fluxes) - subroutine horizontal_remap (dt, ntrace, & - uvel, vvel, & - mm, tm, & - l_fixed_area, & - tracer_type, depend, & - has_dependents, & - integral_order, & - l_dp_midpt) + subroutine horizontal_remap (dt, ntrace, & + uvel, vvel, & + mm, tm, & + l_fixed_area, & + tracer_type, depend, & + has_dependents, & + integral_order, & + l_dp_midpt, grid_ice, & + uvelE, vvelN) use ice_boundary, only: ice_halo, ice_HaloMask, ice_HaloUpdate, & ice_HaloDestroy @@ -333,127 +333,135 @@ subroutine horizontal_remap (dt, ntrace, & use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound real (kind=dbl_kind), intent(in) :: & - dt ! time step + dt ! time step integer (kind=int_kind), intent(in) :: & - ntrace ! number of tracers in use + ntrace ! number of tracers in use real (kind=dbl_kind), intent(in), dimension(nx_block,ny_block,max_blocks) :: & - uvel ,&! x-component of velocity (m/s) - vvel ! y-component of velocity (m/s) + uvel, & ! x-component of velocity (m/s) ugrid + vvel ! y-component of velocity (m/s) ugrid + + real (kind=dbl_kind), intent(in), optional, dimension(nx_block,ny_block,max_blocks) :: & + uvelE, & ! x-component of velocity (m/s) egrid + vvelN ! y-component of velocity (m/s) ngrid real (kind=dbl_kind), intent(inout), dimension (nx_block,ny_block,0:ncat,max_blocks) :: & - mm ! mean mass values in each grid cell + mm ! mean mass values in each grid cell real (kind=dbl_kind), intent(inout), dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & - tm ! mean tracer values in each grid cell + tm ! mean tracer values in each grid cell - !------------------------------------------------------------------- - ! If l_fixed_area is true, the area of each departure region is - ! computed in advance (e.g., by taking the divergence of the - ! velocity field and passed to locate_triangles. The departure - ! regions are adjusted to obtain the desired area. - ! If false, edgearea is computed in locate_triangles and passed out. - !------------------------------------------------------------------- + character (len=char_len_long), intent(in) :: & + grid_ice ! ice grid, B, C, etc + + !------------------------------------------------------------------- + ! If l_fixed_area is true, the area of each departure region is + ! computed in advance (e.g., by taking the divergence of the + ! velocity field and passed to locate_triangles. The departure + ! regions are adjusted to obtain the desired area. + ! If false, edgearea is computed in locate_triangles and passed out. + !------------------------------------------------------------------- logical, intent(in) :: & - l_fixed_area ! if true, edgearea_e and edgearea_n are prescribed - ! if false, edgearea is computed here and passed out + l_fixed_area ! if true, edgearea_e and edgearea_n are prescribed + ! if false, edgearea is computed here and passed out integer (kind=int_kind), dimension (ntrace), intent(in) :: & - tracer_type ,&! = 1, 2, or 3 (see comments above) - depend ! tracer dependencies (see above) + tracer_type , & ! = 1, 2, or 3 (see comments above) + depend ! tracer dependencies (see above) logical (kind=log_kind), dimension (ntrace), intent(in) :: & - has_dependents ! true if a tracer has dependent tracers + has_dependents ! true if a tracer has dependent tracers integer (kind=int_kind), intent(in) :: & - integral_order ! polynomial order for triangle integrals + integral_order ! polynomial order for triangle integrals logical (kind=log_kind), intent(in) :: & - l_dp_midpt ! if true, find departure points using - ! corrected midpoint velocity + l_dp_midpt ! if true, find departure points using + ! corrected midpoint velocity ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices - iblk ,&! block index - ilo,ihi,jlo,jhi,&! beginning and end of physical domain - n, m ! ice category, tracer indices + i, j , & ! horizontal indices + iblk , & ! block index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + n, m ! ice category, tracer indices integer (kind=int_kind), dimension(0:ncat,max_blocks) :: & - icellsnc ! number of cells with ice + icellsnc ! number of cells with ice integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat) :: & indxinc, indxjnc ! compressed i/j indices real (kind=dbl_kind), dimension(nx_block,ny_block) :: & - edgearea_e ,&! area of departure regions for east edges - edgearea_n ! area of departure regions for north edges + edgearea_e , & ! area of departure regions for east edges + edgearea_n ! area of departure regions for north edges real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - dpx ,&! x coordinates of departure points at cell corners - dpy ! y coordinates of departure points at cell corners + dpx , & ! x coordinates of departure points at cell corners + dpy ! y coordinates of departure points at cell corners real (kind=dbl_kind), dimension(nx_block,ny_block,0:ncat,max_blocks) :: & - mc ,&! mass at geometric center of cell - mx, my ! limited derivative of mass wrt x and y + mc , & ! mass at geometric center of cell + mx, my ! limited derivative of mass wrt x and y real (kind=dbl_kind), dimension(nx_block,ny_block,0:ncat) :: & - mmask ! = 1. if mass is present, = 0. otherwise + mmask ! = 1. if mass is present, = 0. otherwise real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & - tc ,&! tracer values at geometric center of cell - tx, ty ! limited derivative of tracer wrt x and y + tc , & ! tracer values at geometric center of cell + tx, ty ! limited derivative of tracer wrt x and y real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat) :: & - tmask ! = 1. if tracer is present, = 0. otherwise + tmask ! = 1. if tracer is present, = 0. otherwise real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat) :: & - mflxe, mflxn ! mass transports across E and N cell edges + mflxe, mflxn ! mass transports across E and N cell edges real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat) :: & - mtflxe, mtflxn ! mass*tracer transports across E and N cell edges + mtflxe, mtflxn ! mass*tracer transports across E and N cell edges real (kind=dbl_kind), dimension (nx_block,ny_block,ngroups) :: & - triarea ! area of east-edge departure triangle + triarea ! area of east-edge departure triangle real (kind=dbl_kind), dimension (nx_block,ny_block,0:nvert,ngroups) :: & - xp, yp ! x and y coordinates of special triangle points - ! (need 4 points for triangle integrals) + xp, yp ! x and y coordinates of special triangle points + ! (need 4 points for triangle integrals) integer (kind=int_kind), dimension (nx_block,ny_block,ngroups) :: & - iflux ,&! i index of cell contributing transport - jflux ! j index of cell contributing transport + iflux , & ! i index of cell contributing transport + jflux ! j index of cell contributing transport integer (kind=int_kind), dimension(ngroups,max_blocks) :: & - icellsng ! number of cells with ice + icellsng ! number of cells with ice integer (kind=int_kind), dimension(nx_block*ny_block,ngroups) :: & - indxing, indxjng ! compressed i/j indices + indxing, indxjng ! compressed i/j indices integer (kind=int_kind), dimension(nx_block,ny_block,max_blocks) :: & - halomask ! temporary mask for fast halo updates + halomask ! temporary mask for fast halo updates logical (kind=log_kind) :: & - l_stop ! if true, abort the model + l_stop ! if true, abort the model integer (kind=int_kind) :: & - istop, jstop ! indices of grid cell where model aborts + istop, jstop ! indices of grid cell where model aborts character (len=char_len) :: & - edge ! 'north' or 'east' + edge ! 'north' or 'east' - type (ice_halo) :: halo_info_tracer + type (ice_halo) :: & + halo_info_tracer ! masked halo type (block) :: & - this_block ! block information for current block + this_block ! block information for current block character(len=*), parameter :: subname = '(horizontal_remap)' -!---!------------------------------------------------------------------- -!---! Remap the ice area and associated tracers. -!---! Remap the open water area (without tracers). -!---!------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Remap the ice area and associated tracers. + ! Remap the open water area (without tracers). + !------------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n, & !$OMP indxinc,indxjnc,mmask,tmask,istop,jstop,l_stop) & @@ -464,48 +472,48 @@ subroutine horizontal_remap (dt, ntrace, & istop = 0 jstop = 0 - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - !------------------------------------------------------------------- - ! Compute masks and count ice cells. - ! Masks are used to prevent tracer values in cells without ice from - ! being used to compute tracer gradients. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute masks and count ice cells. + ! Masks are used to prevent tracer values in cells without ice from + ! being used to compute tracer gradients. + !------------------------------------------------------------------- call make_masks (nx_block, ny_block, & ilo, ihi, jlo, jhi, & nghost, ntrace, & has_dependents, icellsnc(:,iblk), & - indxinc(:,:), indxjnc(:,:), & - mm(:,:,:,iblk), mmask(:,:,:), & + indxinc(:,:), indxjnc(:,:), & + mm (:,:,:,iblk), mmask(:,:,:), & tm(:,:,:,:,iblk), tmask(:,:,:,:)) - !------------------------------------------------------------------- - ! Construct linear fields, limiting gradients to preserve monotonicity. - ! Note: Pass in unit arrays instead of true distances HTE, HTN, etc. - ! The resulting gradients are in scaled coordinates. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Construct linear fields, limiting gradients to preserve monotonicity. + ! Note: Pass in unit arrays instead of true distances HTE, HTN, etc. + ! The resulting gradients are in scaled coordinates. + !------------------------------------------------------------------- ! open water - call construct_fields(nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, ntrace, & - tracer_type, depend, & - has_dependents, icellsnc (0,iblk), & - indxinc (:,0), indxjnc(:,0), & - hm (:,:,iblk), xav (:,:,iblk), & - yav (:,:,iblk), xxav (:,:,iblk), & - yyav (:,:,iblk), & -! xyav (:,:,iblk), & -! xxxav (:,:,iblk), xxyav(:,:,iblk), & -! xyyav (:,:,iblk), yyyav(:,:,iblk), & - mm (:,:,0,iblk), mc(:,:,0,iblk), & - mx (:,:,0,iblk), my(:,:,0,iblk), & - mmask (:,:,0) ) + call construct_fields(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, ntrace, & + tracer_type, depend, & + has_dependents, icellsnc(0,iblk), & + indxinc(:,0), indxjnc(:,0), & + hm (:,:,iblk), xav (:,:,iblk), & + yav (:,:,iblk), xxav (:,:,iblk), & + yyav (:,:,iblk), & +! xyav (:,:,iblk), & +! xxxav (:,:,iblk), xxyav (:,:,iblk), & +! xyyav (:,:,iblk), yyyav (:,:,iblk), & + mm (:,:,0,iblk), mc (:,:,0,iblk), & + mx (:,:,0,iblk), my (:,:,0,iblk), & + mmask(:,:,0) ) ! ice categories @@ -516,26 +524,26 @@ subroutine horizontal_remap (dt, ntrace, & nghost, ntrace, & tracer_type, depend, & has_dependents, icellsnc (n,iblk), & - indxinc (:,n), indxjnc(:,n), & + indxinc (:,n), indxjnc(:,n), & hm (:,:,iblk), xav (:,:,iblk), & yav (:,:,iblk), xxav (:,:,iblk), & - yyav (:,:,iblk), & -! xyav (:,:,iblk), & + yyav (:,:,iblk), & +! xyav (:,:,iblk), & ! xxxav (:,:,iblk), xxyav (:,:,iblk), & ! xyyav (:,:,iblk), yyyav (:,:,iblk), & - mm (:,:,n,iblk), mc (:,:,n,iblk), & - mx (:,:,n,iblk), my (:,:,n,iblk), & - mmask (:,:,n), & - tm (:,:,:,n,iblk), tc(:,:,:,n,iblk), & - tx (:,:,:,n,iblk), ty(:,:,:,n,iblk), & + mm (:,:,n,iblk), mc (:,:,n,iblk), & + mx (:,:,n,iblk), my (:,:,n,iblk), & + mmask (:,:,n), & + tm (:,:,:,n,iblk), tc(:,:,:,n,iblk), & + tx (:,:,:,n,iblk), ty(:,:,:,n,iblk), & tmask(:,:,:,n) ) enddo ! n - - !------------------------------------------------------------------- - ! Given velocity field at cell corners, compute departure points - ! of trajectories. - !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Given velocity field at cell corners, compute departure points + ! of trajectories. + !------------------------------------------------------------------- call departure_points(nx_block, ny_block, & ilo, ihi, jlo, jhi, & @@ -544,27 +552,27 @@ subroutine horizontal_remap (dt, ntrace, & dxu (:,:,iblk), dyu (:,:,iblk), & HTN (:,:,iblk), HTE (:,:,iblk), & dpx (:,:,iblk), dpy (:,:,iblk), & - l_dp_midpt, l_stop, & + l_dp_midpt, l_stop, & istop, jstop) if (l_stop) then write(nu_diag,*) 'istep1, my_task, iblk =', & istep1, my_task, iblk write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & - write(nu_diag,*) 'Global i and j:', & - this_block%i_glob(istop), & - this_block%j_glob(jstop) + if (istop > 0 .and. jstop > 0) & + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) call abort_ice(subname//'ERROR: bad departure points') endif enddo ! iblk !$OMP END PARALLEL DO - !------------------------------------------------------------------- - ! Ghost cell updates - ! If nghost >= 2, these calls are not needed - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Ghost cell updates + ! If nghost >= 2, these calls are not needed + !------------------------------------------------------------------- if (nghost==1) then @@ -584,12 +592,12 @@ subroutine horizontal_remap (dt, ntrace, & call ice_HaloUpdate (my, halo_info, & field_loc_center, field_type_vector) - ! tracer fields + ! tracer fields if (maskhalo_remap) then halomask(:,:,:) = 0 !$OMP PARALLEL DO PRIVATE(iblk,this_block,ilo,ihi,jlo,jhi,n,m,j,i) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -608,8 +616,8 @@ subroutine horizontal_remap (dt, ntrace, & enddo enddo !$OMP END PARALLEL DO - call ice_HaloUpdate(halomask, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate (halomask, halo_info, & + field_loc_center, field_type_scalar) call ice_HaloMask(halo_info_tracer, halo_info, halomask) call ice_HaloUpdate (tc, halo_info_tracer, & @@ -642,16 +650,16 @@ subroutine horizontal_remap (dt, ntrace, & istop = 0 jstop = 0 - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - !------------------------------------------------------------------- - ! If l_fixed_area is true, compute edgearea by taking the divergence - ! of the velocity field. Otherwise, initialize edgearea. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! If l_fixed_area is true, compute edgearea by taking the divergence + ! of the velocity field. Otherwise, initialize edgearea. + !------------------------------------------------------------------- do j = 1, ny_block do i = 1, nx_block @@ -661,6 +669,24 @@ subroutine horizontal_remap (dt, ntrace, & enddo if (l_fixed_area) then + if (grid_ice == 'CD' .or. grid_ice == 'C') then ! velocities are already on the center + if (.not.present(uvelE).or..not.present(vvelN)) then + call abort_ice (subname//'ERROR: uvelE,vvelN required with C|CD and l_fixed_area') + endif + + do j = jlo, jhi + do i = ilo-1, ihi + edgearea_e(i,j) = uvelE(i,j,iblk) * HTE(i,j,iblk) * dt + enddo + enddo + + do j = jlo-1, jhi + do i = ilo, ihi + edgearea_n(i,j) = vvelN(i,j,iblk)*HTN(i,j,iblk) * dt + enddo + enddo + + else do j = jlo, jhi do i = ilo-1, ihi edgearea_e(i,j) = (uvel(i,j,iblk) + uvel(i,j-1,iblk)) & @@ -674,125 +700,126 @@ subroutine horizontal_remap (dt, ntrace, & * p5 * HTN(i,j,iblk) * dt enddo enddo + endif endif - !------------------------------------------------------------------- - ! Transports for east cell edges. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Transports for east cell edges. + !------------------------------------------------------------------- - !------------------------------------------------------------------- - ! Compute areas and vertices of departure triangles. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute areas and vertices of departure triangles. + !------------------------------------------------------------------- edge = 'east' call locate_triangles(nx_block, ny_block, & ilo, ihi, jlo, jhi, & nghost, edge, & - icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - dpx (:,:,iblk), dpy (:,:,iblk), & - dxu (:,:,iblk), dyu (:,:,iblk), & - xp(:,:,:,:), yp(:,:,:,:), & + icellsng(:,iblk), & + indxing(:,:), indxjng(:,:), & + dpx (:,:,iblk), dpy(:,:,iblk), & + dxu (:,:,iblk), dyu(:,:,iblk), & + xp (:,:,:,:), yp (:,:,:,:), & iflux, jflux, & triarea, & l_fixed_area, edgearea_e(:,:)) - !------------------------------------------------------------------- - ! Given triangle vertices, compute coordinates of triangle points - ! needed for transport integrals. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Given triangle vertices, compute coordinates of triangle points + ! needed for transport integrals. + !------------------------------------------------------------------- - call triangle_coordinates (nx_block, ny_block, & - integral_order, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - xp, yp) + call triangle_coordinates (nx_block, ny_block, & + integral_order, icellsng(:,iblk), & + indxing(:,:), indxjng(:,:), & + xp, yp) - !------------------------------------------------------------------- - ! Compute the transport across east cell edges by summing contributions - ! from each triangle. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute the transport across east cell edges by summing contributions + ! from each triangle. + !------------------------------------------------------------------- ! open water - call transport_integrals(nx_block, ny_block, & - ntrace, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - tracer_type, depend, & - integral_order, triarea, & - iflux, jflux, & - xp, yp, & - mc(:,:,0,iblk), mx (:,:,0,iblk), & - my(:,:,0,iblk), mflxe(:,:,0)) + call transport_integrals(nx_block, ny_block, & + ntrace, icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + tracer_type, depend, & + integral_order, triarea, & + iflux, jflux, & + xp, yp, & + mc(:,:,0,iblk), mx (:,:,0,iblk), & + my(:,:,0,iblk), mflxe(:,:,0)) ! ice categories do n = 1, ncat call transport_integrals & (nx_block, ny_block, & ntrace, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & + indxing(:,:), indxjng(:,:), & tracer_type, depend, & integral_order, triarea, & iflux, jflux, & xp, yp, & - mc(:,:, n,iblk), mx (:,:, n,iblk), & - my(:,:, n,iblk), mflxe(:,:, n), & - tc(:,:,:,n,iblk), tx (:,:,:,n,iblk), & + mc(:,:, n,iblk), mx (:,:, n,iblk), & + my(:,:, n,iblk), mflxe (:,:, n), & + tc(:,:,:,n,iblk), tx (:,:,:,n,iblk), & ty(:,:,:,n,iblk), mtflxe(:,:,:,n)) enddo - !------------------------------------------------------------------- - ! Repeat for north edges - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Repeat for north edges + !------------------------------------------------------------------- edge = 'north' call locate_triangles(nx_block, ny_block, & ilo, ihi, jlo, jhi, & nghost, edge, & - icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - dpx (:,:,iblk), dpy (:,:,iblk), & - dxu (:,:,iblk), dyu (:,:,iblk), & - xp(:,:,:,:), yp(:,:,:,:), & + icellsng(:,iblk), & + indxing(:,:), indxjng(:,:), & + dpx (:,:,iblk), dpy (:,:,iblk), & + dxu (:,:,iblk), dyu (:,:,iblk), & + xp (:,:,:,:), yp(:,:,:,:), & iflux, jflux, & triarea, & l_fixed_area, edgearea_n(:,:)) - call triangle_coordinates (nx_block, ny_block, & - integral_order, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - xp, yp) + call triangle_coordinates (nx_block, ny_block, & + integral_order, icellsng(:,iblk), & + indxing(:,:), indxjng(:,:), & + xp, yp) ! open water - call transport_integrals(nx_block, ny_block, & - ntrace, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - tracer_type, depend, & - integral_order, triarea, & - iflux, jflux, & - xp, yp, & - mc(:,:,0,iblk), mx(:,:,0,iblk), & - my(:,:,0,iblk), mflxn(:,:,0)) + call transport_integrals(nx_block, ny_block, & + ntrace, icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + tracer_type, depend, & + integral_order, triarea, & + iflux, jflux, & + xp, yp, & + mc(:,:,0,iblk), mx (:,:,0,iblk), & + my(:,:,0,iblk), mflxn(:,:,0)) ! ice categories do n = 1, ncat call transport_integrals & (nx_block, ny_block, & ntrace, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & + indxing(:,:), indxjng(:,:), & tracer_type, depend, & integral_order, triarea, & iflux, jflux, & xp, yp, & - mc(:,:, n,iblk), mx (:,:, n,iblk), & - my(:,:, n,iblk), mflxn(:,:, n), & - tc(:,:,:,n,iblk), tx (:,:,:,n,iblk), & + mc(:,:, n,iblk), mx (:,:, n,iblk), & + my(:,:, n,iblk), mflxn (:,:, n), & + tc(:,:,:,n,iblk), tx (:,:,:,n,iblk), & ty(:,:,:,n,iblk), mtflxn(:,:,:,n)) enddo ! n - !------------------------------------------------------------------- - ! Update the ice area and tracers. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Update the ice area and tracers. + !------------------------------------------------------------------- ! open water call update_fields (nx_block, ny_block, & @@ -805,14 +832,14 @@ subroutine horizontal_remap (dt, ntrace, & mm (:,:,0,iblk)) if (l_stop) then - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) write (nu_diag,*) 'istep1, my_task, iblk, cat =', & istep1, my_task, iblk, '0' write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & + if (istop > 0 .and. jstop > 0) & write(nu_diag,*) 'Global i and j:', & this_block%i_glob(istop), & - this_block%j_glob(jstop) + this_block%j_glob(jstop) call abort_ice (subname//'ERROR: negative area (open water)') endif @@ -823,12 +850,12 @@ subroutine horizontal_remap (dt, ntrace, & ilo, ihi, jlo, jhi, & ntrace, & tracer_type, depend, & - tarear(:,:,iblk), l_stop, & + tarear (:,:,iblk), l_stop, & istop, jstop, & - mflxe(:,:, n), mflxn(:,:, n), & - mm (:,:, n,iblk), & + mflxe (:,:, n), mflxn (:,:, n), & + mm (:,:, n,iblk), & mtflxe(:,:,:,n), mtflxn(:,:,:,n), & - tm (:,:,:,n,iblk)) + tm (:,:,:,n,iblk)) if (l_stop) then write (nu_diag,*) 'istep1, my_task, iblk, cat =', & @@ -837,7 +864,7 @@ subroutine horizontal_remap (dt, ntrace, & if (istop > 0 .and. jstop > 0) & write(nu_diag,*) 'Global i and j:', & this_block%i_glob(istop), & - this_block%j_glob(jstop) + this_block%j_glob(jstop) call abort_ice (subname//'ERROR: negative area (ice)') endif enddo ! n @@ -862,53 +889,53 @@ end subroutine horizontal_remap ! ! author William H. Lipscomb, LANL - subroutine make_masks (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, ntrace, & - has_dependents, & - icells, & - indxi, indxj, & - mm, mmask, & - tm, tmask) + subroutine make_masks (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, ntrace, & + has_dependents, & + icells, & + indxi, indxj, & + mm, mmask, & + tm, tmask) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain - nghost ,&! number of ghost cells - ntrace ! number of tracers in use + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nghost , & ! number of ghost cells + ntrace ! number of tracers in use logical (kind=log_kind), dimension (ntrace), intent(in) :: & - has_dependents ! true if a tracer has dependent tracers + has_dependents ! true if a tracer has dependent tracers integer (kind=int_kind), dimension(0:ncat), intent(out) :: & - icells ! number of cells with ice + icells ! number of cells with ice integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat), intent(out) :: & - indxi ,&! compressed i/j indices - indxj + indxi , & ! compressed i/j indices + indxj real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), intent(in) :: & - mm ! mean ice area in each grid cell + mm ! mean ice area in each grid cell real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), intent(out) :: & - mmask ! = 1. if ice is present, else = 0. + mmask ! = 1. if ice is present, else = 0. real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace, ncat), intent(in), optional :: & - tm ! mean tracer values in each grid cell + tm ! mean tracer values in each grid cell real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace, ncat), intent(out), optional :: & - tmask ! = 1. if tracer is present, else = 0. + tmask ! = 1. if tracer is present, else = 0. ! local variables integer (kind=int_kind) :: & - i, j, ij ,&! horizontal indices - n ,&! ice category index - nt ! tracer index + i, j, ij , & ! horizontal indices + n , & ! ice category index + nt ! tracer index real (kind=dbl_kind) :: & - puny ! + puny ! character(len=*), parameter :: subname = '(make_masks)' @@ -924,9 +951,9 @@ subroutine make_masks (nx_block, ny_block, & enddo enddo - !------------------------------------------------------------------- - ! open water mask - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! open water mask + !------------------------------------------------------------------- icells(0) = 0 do j = 1, ny_block @@ -945,9 +972,9 @@ subroutine make_masks (nx_block, ny_block, & do n = 1, ncat - !------------------------------------------------------------------- - ! Find grid cells where ice is present. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Find grid cells where ice is present. + !------------------------------------------------------------------- icells(n) = 0 do j = 1, ny_block @@ -961,9 +988,9 @@ subroutine make_masks (nx_block, ny_block, & enddo enddo - !------------------------------------------------------------------- - ! ice area mask - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! ice area mask + !------------------------------------------------------------------- mmask(:,:,n) = c0 do ij = 1, icells(n) @@ -972,9 +999,9 @@ subroutine make_masks (nx_block, ny_block, & mmask(i,j,n) = c1 enddo - !------------------------------------------------------------------- - ! tracer masks - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! tracer masks + !------------------------------------------------------------------- if (present(tm)) then @@ -994,11 +1021,11 @@ subroutine make_masks (nx_block, ny_block, & endif ! present(tm) - !------------------------------------------------------------------- - ! Redefine icells - ! For nghost = 1, exclude ghost cells - ! For nghost = 2, include one layer of ghost cells - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Redefine icells + ! For nghost = 1, exclude ghost cells + ! For nghost = 2, include one layer of ghost cells + !------------------------------------------------------------------- icells(n) = 0 do j = jlo-nghost+1, jhi+nghost-1 @@ -1011,7 +1038,7 @@ subroutine make_masks (nx_block, ny_block, & endif ! mm > puny enddo enddo - + enddo ! n end subroutine make_masks @@ -1043,109 +1070,109 @@ subroutine construct_fields (nx_block, ny_block, & tmask) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain - nghost ,&! number of ghost cells - ntrace ,&! number of tracers in use + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nghost , & ! number of ghost cells + ntrace , & ! number of tracers in use icells ! number of cells with mass integer (kind=int_kind), dimension (ntrace), intent(in) :: & - tracer_type ,&! = 1, 2, or 3 (see comments above) - depend ! tracer dependencies (see above) + tracer_type , & ! = 1, 2, or 3 (see comments above) + depend ! tracer dependencies (see above) logical (kind=log_kind), dimension (ntrace), intent(in) :: & - has_dependents ! true if a tracer has dependent tracers + has_dependents ! true if a tracer has dependent tracers integer (kind=int_kind), dimension(nx_block*ny_block), intent(in) :: & - indxi ,&! compressed i/j indices + indxi , & ! compressed i/j indices indxj real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - hm ,&! land/boundary mask, thickness (T-cell) - xav, yav ,&! mean T-cell values of x, y - xxav, yyav ! mean T-cell values of xx, yy -! xyav, ,&! mean T-cell values of xy + hm , & ! land/boundary mask, thickness (T-cell) + xav, yav , & ! mean T-cell values of x, y + xxav, yyav ! mean T-cell values of xx, yy +! xyav, , & ! mean T-cell values of xy ! xxxav,xxyav,xyyav,yyyav ! mean T-cell values of xxx, xxy, xyy, yyy real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - mm ,&! mean value of mass field - mmask ! = 1. if ice is present, = 0. otherwise + mm , & ! mean value of mass field + mmask ! = 1. if ice is present, = 0. otherwise real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace), intent(in), optional :: & - tm ,&! mean tracer - tmask ! = 1. if tracer is present, = 0. otherwise + tm , & ! mean tracer + tmask ! = 1. if tracer is present, = 0. otherwise real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - mc ,&! mass value at geometric center of cell - mx, my ! limited derivative of mass wrt x and y + mc , & ! mass value at geometric center of cell + mx, my ! limited derivative of mass wrt x and y real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace), intent(out), optional :: & - tc ,&! tracer at geometric center of cell - tx, ty ! limited derivative of tracer wrt x and y + tc , & ! tracer at geometric center of cell + tx, ty ! limited derivative of tracer wrt x and y ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices - nt, nt1 ,&! tracer indices - ij ! combined i/j horizontal index + i, j , & ! horizontal indices + nt, nt1 , & ! tracer indices + ij ! combined i/j horizontal index real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - mxav ,&! x coordinate of center of mass - myav ! y coordinate of center of mass + mxav , & ! x coordinate of center of mass + myav ! y coordinate of center of mass real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace) :: & - mtxav ,&! x coordinate of center of mass*tracer - mtyav ! y coordinate of center of mass*tracer + mtxav , & ! x coordinate of center of mass*tracer + mtyav ! y coordinate of center of mass*tracer real (kind=dbl_kind) :: & puny, & - w1, w2, w3, w7 ! work variables + w1, w2, w3, w7 ! work variables character(len=*), parameter :: subname = '(construct_fields)' - !------------------------------------------------------------------- - ! Compute field values at the geometric center of each grid cell, - ! and compute limited gradients in the x and y directions. - ! - ! For second order accuracy, each state variable is approximated as - ! a field varying linearly over x and y within each cell. For each - ! category, the integrated value of m(x,y) over the cell must - ! equal mm(i,j,n)*tarea(i,j), where tarea(i,j) is the cell area. - ! Similarly, the integrated value of m(x,y)*t(x,y) must equal - ! the total mass*tracer, mm(i,j,n)*tm(i,j,n)*tarea(i,j). - ! - ! These integral conditions are satisfied for linear fields if we - ! stipulate the following: - ! (1) The mean mass, mm, is equal to the mass at the cell centroid. - ! (2) The mean value tm1 of type 1 tracers is equal to the value - ! at the center of mass. - ! (3) The mean value tm2 of type 2 tracers is equal to the value - ! at the center of mass*tm1, where tm2 depends on tm1. - ! (See comments at the top of the module.) - ! - ! We want to find the value of each state variable at a standard - ! reference point, which we choose to be the geometric center of - ! the cell. The geometric center is located at the intersection - ! of the line joining the midpoints of the north and south edges - ! with the line joining the midpoints of the east and west edges. - ! To find the value at the geometric center, we must know the - ! location of the cell centroid/center of mass, along with the - ! mean value and the gradients with respect to x and y. - ! - ! The cell gradients are first computed from the difference between - ! values in the neighboring cells, then limited by requiring that - ! no new extrema are created within the cell. - ! - ! For rectangular coordinates the centroid and the geometric - ! center coincide, which means that some of the equations in this - ! subroutine could be simplified. However, the full equations - ! are retained for generality. - !------------------------------------------------------------------- - - !------------------------------------------------------------------- - ! Initialize - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute field values at the geometric center of each grid cell, + ! and compute limited gradients in the x and y directions. + ! + ! For second order accuracy, each state variable is approximated as + ! a field varying linearly over x and y within each cell. For each + ! category, the integrated value of m(x,y) over the cell must + ! equal mm(i,j,n)*tarea(i,j), where tarea(i,j) is the cell area. + ! Similarly, the integrated value of m(x,y)*t(x,y) must equal + ! the total mass*tracer, mm(i,j,n)*tm(i,j,n)*tarea(i,j). + ! + ! These integral conditions are satisfied for linear fields if we + ! stipulate the following: + ! (1) The mean mass, mm, is equal to the mass at the cell centroid. + ! (2) The mean value tm1 of type 1 tracers is equal to the value + ! at the center of mass. + ! (3) The mean value tm2 of type 2 tracers is equal to the value + ! at the center of mass*tm1, where tm2 depends on tm1. + ! (See comments at the top of the module.) + ! + ! We want to find the value of each state variable at a standard + ! reference point, which we choose to be the geometric center of + ! the cell. The geometric center is located at the intersection + ! of the line joining the midpoints of the north and south edges + ! with the line joining the midpoints of the east and west edges. + ! To find the value at the geometric center, we must know the + ! location of the cell centroid/center of mass, along with the + ! mean value and the gradients with respect to x and y. + ! + ! The cell gradients are first computed from the difference between + ! values in the neighboring cells, then limited by requiring that + ! no new extrema are created within the cell. + ! + ! For rectangular coordinates the centroid and the geometric + ! center coincide, which means that some of the equations in this + ! subroutine could be simplified. However, the full equations + ! are retained for generality. + !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Initialize + !------------------------------------------------------------------- call icepack_query_parameters(puny_out=puny) call icepack_warnings_flush(nu_diag) @@ -1173,7 +1200,7 @@ subroutine construct_fields (nx_block, ny_block, & enddo enddo endif - + ! limited gradient of mass field in each cell (except masked cells) ! Note: The gradient is computed in scaled coordinates with ! dxt = dyt = hte = htn = 1. @@ -1190,7 +1217,7 @@ subroutine construct_fields (nx_block, ny_block, & j = indxj(ij) ! mass field at geometric center -!echmod: xav = yav = 0 + ! echmod: xav = yav = 0 mc(i,j) = mm(i,j) ! mc(i,j) = mm(i,j) - xav(i,j)*mx(i,j) & @@ -1202,129 +1229,130 @@ subroutine construct_fields (nx_block, ny_block, & if (present(tm)) then - do ij = 1,icells ! cells with mass - i = indxi(ij) - j = indxj(ij) - - ! center of mass (mxav,myav) for each cell -!echmod: xyav = 0 - mxav(i,j) = (mx(i,j)*xxav(i,j) & - + mc(i,j)*xav (i,j)) / mm(i,j) - myav(i,j) = (my(i,j)*yyav(i,j) & - + mc(i,j)*yav(i,j)) / mm(i,j) - -! mxav(i,j) = (mx(i,j)*xxav(i,j) & -! + my(i,j)*xyav(i,j) & -! + mc(i,j)*xav (i,j)) / mm(i,j) -! myav(i,j) = (mx(i,j)*xyav(i,j) & -! + my(i,j)*yyav(i,j) & -! + mc(i,j)*yav(i,j)) / mm(i,j) - enddo - - do nt = 1, ntrace - - if (tracer_type(nt)==1) then ! independent of other tracers - - call limited_gradient(nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, & - tm(:,:,nt), mmask, & - mxav, myav, & - tx(:,:,nt), ty(:,:,nt)) - - if (has_dependents(nt)) then ! need center of area*tracer - - do j = 1, ny_block - do i = 1, nx_block - mtxav(i,j,nt) = c0 - mtyav(i,j,nt) = c0 - enddo - enddo - - do ij = 1, icells ! Note: no tx or ty in ghost cells - ! (bound calls are later) - i = indxi(ij) - j = indxj(ij) + do ij = 1,icells ! cells with mass + i = indxi(ij) + j = indxj(ij) + + ! center of mass (mxav,myav) for each cell + ! echmod: xyav = 0 + mxav(i,j) = (mx(i,j)*xxav(i,j) & + + mc(i,j)*xav (i,j)) / mm(i,j) + myav(i,j) = (my(i,j)*yyav(i,j) & + + mc(i,j)*yav(i,j)) / mm(i,j) + +! mxav(i,j) = (mx(i,j)*xxav(i,j) & +! + my(i,j)*xyav(i,j) & +! + mc(i,j)*xav (i,j)) / mm(i,j) +! myav(i,j) = (mx(i,j)*xyav(i,j) & +! + my(i,j)*yyav(i,j) & +! + mc(i,j)*yav(i,j)) / mm(i,j) + enddo - ! tracer value at geometric center - tc(i,j,nt) = tm(i,j,nt) - tx(i,j,nt)*mxav(i,j) & - - ty(i,j,nt)*myav(i,j) - - if (tmask(i,j,nt) > puny) then - - ! center of area*tracer - w1 = mc(i,j)*tc(i,j,nt) - w2 = mc(i,j)*tx(i,j,nt) & - + mx(i,j)*tc(i,j,nt) - w3 = mc(i,j)*ty(i,j,nt) & - + my(i,j)*tc(i,j,nt) -! w4 = mx(i,j)*tx(i,j,nt) -! w5 = mx(i,j)*ty(i,j,nt) & -! + my(i,j)*tx(i,j,nt) -! w6 = my(i,j)*ty(i,j,nt) - w7 = c1 / (mm(i,j)*tm(i,j,nt)) -!echmod: grid arrays = 0 - mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j)) & - * w7 - mtyav(i,j,nt) = (w1*yav(i,j) + w3*yyav(i,j)) & - * w7 - -! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j) & -! + w3*xyav (i,j) + w4*xxxav(i,j) & -! + w5*xxyav(i,j) + w6*xyyav(i,j)) & -! * w7 -! mtyav(i,j,nt) = (w1*yav(i,j) + w2*xyav (i,j) & -! + w3*yyav(i,j) + w4*xxyav(i,j) & -! + w5*xyyav(i,j) + w6*yyyav(i,j)) & -! * w7 - endif ! tmask + do nt = 1, ntrace - enddo ! ij + if (tracer_type(nt)==1) then ! independent of other tracers - else ! no dependents + call limited_gradient(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + tm(:,:,nt), mmask, & + mxav, myav, & + tx(:,:,nt), ty(:,:,nt)) - do ij = 1, icells ! mass is present - i = indxi(ij) - j = indxj(ij) + if (has_dependents(nt)) then ! need center of area*tracer - ! tracer value at geometric center - tc(i,j,nt) = tm(i,j,nt) - tx(i,j,nt)*mxav(i,j) & - - ty(i,j,nt)*myav(i,j) - enddo ! ij + do j = 1, ny_block + do i = 1, nx_block + mtxav(i,j,nt) = c0 + mtyav(i,j,nt) = c0 + enddo + enddo - endif ! has_dependents + do ij = 1, icells ! Note: no tx or ty in ghost cells + ! (bound calls are later) + i = indxi(ij) + j = indxj(ij) + + ! tracer value at geometric center + tc(i,j,nt) = tm(i,j,nt) - tx(i,j,nt)*mxav(i,j) & + - ty(i,j,nt)*myav(i,j) + + if (tmask(i,j,nt) > puny) then + + ! center of area*tracer + w1 = mc(i,j)*tc(i,j,nt) + w2 = mc(i,j)*tx(i,j,nt) & + + mx(i,j)*tc(i,j,nt) + w3 = mc(i,j)*ty(i,j,nt) & + + my(i,j)*tc(i,j,nt) +! w4 = mx(i,j)*tx(i,j,nt) +! w5 = mx(i,j)*ty(i,j,nt) & +! + my(i,j)*tx(i,j,nt) +! w6 = my(i,j)*ty(i,j,nt) + w7 = c1 / (mm(i,j)*tm(i,j,nt)) + ! echmod: grid arrays = 0 + mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j)) & + * w7 + mtyav(i,j,nt) = (w1*yav(i,j) + w3*yyav(i,j)) & + * w7 + +! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j) & +! + w3*xyav (i,j) + w4*xxxav(i,j) & +! + w5*xxyav(i,j) + w6*xyyav(i,j)) & +! * w7 +! mtyav(i,j,nt) = (w1*yav(i,j) + w2*xyav (i,j) & +! + w3*yyav(i,j) + w4*xxyav(i,j) & +! + w5*xyyav(i,j) + w6*yyyav(i,j)) & +! * w7 + endif ! tmask + + enddo ! ij + + else ! no dependents + + do ij = 1, icells ! mass is present + i = indxi(ij) + j = indxj(ij) + + ! tracer value at geometric center + tc(i,j,nt) = tm(i,j,nt) - tx(i,j,nt)*mxav(i,j) & + - ty(i,j,nt)*myav(i,j) + enddo ! ij + + endif ! has_dependents + + elseif (tracer_type(nt)==2) then ! tracer nt depends on nt1 + nt1 = depend(nt) - elseif (tracer_type(nt)==2) then ! tracer nt depends on nt1 - nt1 = depend(nt) + call limited_gradient(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + tm (:,:,nt), tmask(:,:,nt1), & + mtxav(:,:,nt1), mtyav(:,:,nt1), & + tx (:,:,nt), ty (:,:,nt)) - call limited_gradient(nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, & - tm(:,:,nt), tmask(:,:,nt1), & - mtxav(:,:,nt1), mtyav(:,:,nt1), & - tx(:,:,nt), ty(:,:,nt)) + do ij = 1, icells ! ice is present + i = indxi(ij) + j = indxj(ij) + tc(i,j,nt) = tm(i,j,nt) & + - tx(i,j,nt) * mtxav(i,j,nt1) & + - ty(i,j,nt) * mtyav(i,j,nt1) + enddo ! ij - do ij = 1, icells ! ice is present - i = indxi(ij) - j = indxj(ij) - tc(i,j,nt) = tm(i,j,nt) & - - tx(i,j,nt) * mtxav(i,j,nt1) & - - ty(i,j,nt) * mtyav(i,j,nt1) - enddo ! ij + elseif (tracer_type(nt)==3) then ! upwind approx; gradient = 0 - elseif (tracer_type(nt)==3) then ! upwind approx; gradient = 0 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) - do ij = 1, icells - i = indxi(ij) - j = indxj(ij) + tc(i,j,nt) = tm(i,j,nt) +! tx(i,j,nt) = c0 ! already initialized to 0. +! ty(i,j,nt) = c0 + enddo ! ij - tc(i,j,nt) = tm(i,j,nt) -! tx(i,j,nt) = c0 ! already initialized to 0. -! ty(i,j,nt) = c0 - enddo ! ij + endif ! tracer_type - endif ! tracer_type - enddo ! ntrace + enddo ! ntrace endif ! present (tm) @@ -1349,43 +1377,42 @@ subroutine limited_gradient (nx_block, ny_block, & gx, gy) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain - nghost ! number of ghost cells + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nghost ! number of ghost cells real (kind=dbl_kind), dimension (nx_block,ny_block), intent (in) :: & - phi ,&! input tracer field (mean values in each grid cell) - cnx ,&! x-coordinate of phi relative to geometric center of cell - cny ,&! y-coordinate of phi relative to geometric center of cell - phimask - ! phimask(i,j) = 1 if phi(i,j) has physical meaning, = 0 otherwise. - ! For instance, aice has no physical meaning in land cells, - ! and hice no physical meaning where aice = 0. + phi , & ! input tracer field (mean values in each grid cell) + cnx , & ! x-coordinate of phi relative to geometric center of cell + cny , & ! y-coordinate of phi relative to geometric center of cell + phimask ! phimask(i,j) = 1 if phi(i,j) has physical meaning, = 0 otherwise. + ! For instance, aice has no physical meaning in land cells, + ! and hice no physical meaning where aice = 0. real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - gx ,&! limited x-direction gradient - gy ! limited y-direction gradient + gx , & ! limited x-direction gradient + gy ! limited y-direction gradient ! local variables integer (kind=int_kind) :: & - i, j, ij ,&! standard indices - icells ! number of cells to limit + i, j, ij , & ! standard indices + icells ! number of cells to limit integer (kind=int_kind), dimension(nx_block*ny_block) :: & - indxi, indxj ! combined i/j horizontal indices + indxi, indxj ! combined i/j horizontal indices real (kind=dbl_kind) :: & - phi_nw, phi_n, phi_ne ,&! values of phi in 8 neighbor cells - phi_w, phi_e ,& - phi_sw, phi_s, phi_se ,& - qmn, qmx ,&! min and max value of phi within grid cell - pmn, pmx ,&! min and max value of phi among neighbor cells - w1, w2, w3, w4 ! work variables + phi_nw, phi_n, phi_ne , & ! values of phi in 8 neighbor cells + phi_w, phi_e , & + phi_sw, phi_s, phi_se , & + qmn, qmx , & ! min and max value of phi within grid cell + pmn, pmx , & ! min and max value of phi among neighbor cells + w1, w2, w3, w4 ! work variables real (kind=dbl_kind) :: & - puny, & ! - gxtmp, gytmp ! temporary term for x- and y- limited gradient + puny , & ! + gxtmp, gytmp ! temporary term for x- and y- limited gradient character(len=*), parameter :: subname = '(limited_gradient)' @@ -1419,22 +1446,22 @@ subroutine limited_gradient (nx_block, ny_block, & ! Note: phimask = 1. or 0. If phimask = 1., use the true value; ! if phimask = 0., use the home cell value so that non-physical ! values of phi do not contribute to the gradient. - phi_nw = phimask(i-1,j+1) * phi(i-1,j+1) & - + (c1-phimask(i-1,j+1))* phi(i,j) - phi_n = phimask(i,j+1) * phi(i,j+1) & - + (c1-phimask(i,j+1)) * phi(i,j) - phi_ne = phimask(i+1,j+1) * phi(i+1,j+1) & - + (c1-phimask(i+1,j+1))* phi(i,j) - phi_w = phimask(i-1,j) * phi(i-1,j) & - + (c1-phimask(i-1,j)) * phi(i,j) - phi_e = phimask(i+1,j) * phi(i+1,j) & - + (c1-phimask(i+1,j)) * phi(i,j) - phi_sw = phimask(i-1,j-1) * phi(i-1,j-1) & - + (c1-phimask(i-1,j-1))* phi(i,j) - phi_s = phimask(i,j-1) * phi(i,j-1) & - + (c1-phimask(i,j-1)) * phi(i,j) - phi_se = phimask(i+1,j-1) * phi(i+1,j-1) & - + (c1-phimask(i+1,j-1))* phi(i,j) + phi_nw = phimask(i-1,j+1) * phi(i-1,j+1) & + + (c1-phimask(i-1,j+1))* phi(i ,j ) + phi_n = phimask(i ,j+1) * phi(i ,j+1) & + + (c1-phimask(i ,j+1))* phi(i ,j ) + phi_ne = phimask(i+1,j+1) * phi(i+1,j+1) & + + (c1-phimask(i+1,j+1))* phi(i ,j ) + phi_w = phimask(i-1,j ) * phi(i-1,j ) & + + (c1-phimask(i-1,j ))* phi(i ,j ) + phi_e = phimask(i+1,j ) * phi(i+1,j ) & + + (c1-phimask(i+1,j ))* phi(i ,j ) + phi_sw = phimask(i-1,j-1) * phi(i-1,j-1) & + + (c1-phimask(i-1,j-1))* phi(i ,j ) + phi_s = phimask(i ,j-1) * phi(i ,j-1) & + + (c1-phimask(i ,j-1))* phi(i ,j ) + phi_se = phimask(i+1,j-1) * phi(i+1,j-1) & + + (c1-phimask(i+1,j-1))* phi(i ,j ) ! unlimited gradient components ! (factors of two cancel out) @@ -1505,34 +1532,34 @@ subroutine departure_points (nx_block, ny_block, & istop, jstop) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi, &! beginning and end of physical domain - nghost ! number of ghost cells + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + nghost ! number of ghost cells real (kind=dbl_kind), intent(in) :: & dt ! time step (s) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvel ,&! x-component of velocity (m/s) - vvel ,&! y-component of velocity (m/s) - dxu ,&! E-W dimensions of U-cell (m) - dyu ,&! N-S dimensions of U-cell (m) - HTN ,&! length of north face of T-cell (m) - HTE ! length of east face of T-cell (m) + uvel , & ! x-component of velocity (m/s) + vvel , & ! y-component of velocity (m/s) + dxu , & ! E-W dimensions of U-cell (m) + dyu , & ! N-S dimensions of U-cell (m) + HTN , & ! length of north face of T-cell (m) + HTE ! length of east face of T-cell (m) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - dpx ,&! coordinates of departure points (m) - dpy ! coordinates of departure points (m) + dpx , & ! coordinates of departure points (m) + dpy ! coordinates of departure points (m) logical (kind=log_kind), intent(in) :: & - l_dp_midpt ! if true, find departure points using - ! corrected midpoint velocity + l_dp_midpt ! if true, find departure points using + ! corrected midpoint velocity logical (kind=log_kind), intent(inout) :: & - l_stop ! if true, abort on return + l_stop ! if true, abort on return integer (kind=int_kind), intent(inout) :: & - istop, jstop ! indices of grid cell where model aborts + istop, jstop ! indices of grid cell where model aborts ! local variables @@ -1540,20 +1567,20 @@ subroutine departure_points (nx_block, ny_block, & i, j, i2, j2 ! horizontal indices real (kind=dbl_kind) :: & - mpx, mpy ,&! coordinates of midpoint of back trajectory, + mpx, mpy , & ! coordinates of midpoint of back trajectory, ! relative to cell corner - mpxt, mpyt ,&! midpoint coordinates relative to cell center + mpxt, mpyt , & ! midpoint coordinates relative to cell center ump, vmp ! corrected velocity at midpoint character(len=*), parameter :: subname = '(departure_points)' - !------------------------------------------------------------------- - ! Estimate departure points. - ! This estimate is 1st-order accurate in time; improve accuracy by - ! using midpoint approximation (to add later). - ! For nghost = 1, loop over physical cells and update ghost cells later. - ! For nghost = 2, loop over a layer of ghost cells and skip update. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Estimate departure points. + ! This estimate is 1st-order accurate in time; improve accuracy by + ! using midpoint approximation (to add later). + ! For nghost = 1, loop over physical cells and update ghost cells later. + ! For nghost = 2, loop over a layer of ghost cells and skip update. + !------------------------------------------------------------------- dpx(:,:) = c0 dpy(:,:) = c0 @@ -1588,84 +1615,84 @@ subroutine departure_points (nx_block, ny_block, & return endif - if (l_dp_midpt) then ! find dep pts using corrected midpt velocity - - do j = jlo-nghost+1, jhi+nghost-1 - do i = ilo-nghost+1, ihi+nghost-1 - if (uvel(i,j)/=c0 .or. vvel(i,j)/=c0) then - - !------------------------------------------------------------------- - ! Scale departure points to coordinate system in which grid cells - ! have sides of unit length. - !------------------------------------------------------------------- - - dpx(i,j) = dpx(i,j) / dxu(i,j) - dpy(i,j) = dpy(i,j) / dyu(i,j) - - !------------------------------------------------------------------- - ! Estimate midpoint of backward trajectory relative to corner (i,j). - !------------------------------------------------------------------- - - mpx = p5 * dpx(i,j) - mpy = p5 * dpy(i,j) - - !------------------------------------------------------------------- - ! Determine the indices (i2,j2) of the cell where the trajectory lies. - ! Compute the coordinates of the midpoint of the backward trajectory - ! relative to the cell center in a stretch coordinate system - ! with vertices at (1/2, 1/2), (1/2, -1/2), etc. - !------------------------------------------------------------------- - - if (mpx >= c0 .and. mpy >= c0) then ! cell (i+1,j+1) - i2 = i+1 - j2 = j+1 - mpxt = mpx - p5 - mpyt = mpy - p5 - elseif (mpx < c0 .and. mpy < c0) then ! cell (i,j) - i2 = i - j2 = j - mpxt = mpx + p5 - mpyt = mpy + p5 - elseif (mpx >= c0 .and. mpy < c0) then ! cell (i+1,j) - i2 = i+1 - j2 = j - mpxt = mpx - p5 - mpyt = mpy + p5 - elseif (mpx < c0 .and. mpy >= c0) then ! cell (i,j+1) - i2 = i - j2 = j+1 - mpxt = mpx + p5 - mpyt = mpy - p5 - endif - - !------------------------------------------------------------------- - ! Using a bilinear approximation, estimate the velocity at the - ! trajectory midpoint in the (i2,j2) reference frame. - !------------------------------------------------------------------- - - ump = uvel(i2-1,j2-1)*(mpxt-p5)*(mpyt-p5) & - - uvel(i2, j2-1)*(mpxt+p5)*(mpyt-p5) & - + uvel(i2, j2 )*(mpxt+p5)*(mpyt+p5) & - - uvel(i2-1,j2 )*(mpxt-p5)*(mpyt+p5) - - vmp = vvel(i2-1,j2-1)*(mpxt-p5)*(mpyt-p5) & - - vvel(i2, j2-1)*(mpxt+p5)*(mpyt-p5) & - + vvel(i2, j2 )*(mpxt+p5)*(mpyt+p5) & - - vvel(i2-1,j2 )*(mpxt-p5)*(mpyt+p5) - - !------------------------------------------------------------------- - ! Use the midpoint velocity to estimate the coordinates of the - ! departure point relative to corner (i,j). - !------------------------------------------------------------------- - - dpx(i,j) = -dt * ump - dpy(i,j) = -dt * vmp - - endif ! nonzero velocity - - enddo ! i - enddo ! j - + if (l_dp_midpt) then ! find dep pts using corrected midpt velocity + + do j = jlo-nghost+1, jhi+nghost-1 + do i = ilo-nghost+1, ihi+nghost-1 + if (uvel(i,j)/=c0 .or. vvel(i,j)/=c0) then + + !------------------------------------------------------------------- + ! Scale departure points to coordinate system in which grid cells + ! have sides of unit length. + !------------------------------------------------------------------- + + dpx(i,j) = dpx(i,j) / dxu(i,j) + dpy(i,j) = dpy(i,j) / dyu(i,j) + + !------------------------------------------------------------------- + ! Estimate midpoint of backward trajectory relative to corner (i,j). + !------------------------------------------------------------------- + + mpx = p5 * dpx(i,j) + mpy = p5 * dpy(i,j) + + !------------------------------------------------------------------- + ! Determine the indices (i2,j2) of the cell where the trajectory lies. + ! Compute the coordinates of the midpoint of the backward trajectory + ! relative to the cell center in a stretch coordinate system + ! with vertices at (1/2, 1/2), (1/2, -1/2), etc. + !------------------------------------------------------------------- + + if (mpx >= c0 .and. mpy >= c0) then ! cell (i+1,j+1) + i2 = i+1 + j2 = j+1 + mpxt = mpx - p5 + mpyt = mpy - p5 + elseif (mpx < c0 .and. mpy < c0) then ! cell (i,j) + i2 = i + j2 = j + mpxt = mpx + p5 + mpyt = mpy + p5 + elseif (mpx >= c0 .and. mpy < c0) then ! cell (i+1,j) + i2 = i+1 + j2 = j + mpxt = mpx - p5 + mpyt = mpy + p5 + elseif (mpx < c0 .and. mpy >= c0) then ! cell (i,j+1) + i2 = i + j2 = j+1 + mpxt = mpx + p5 + mpyt = mpy - p5 + endif + + !------------------------------------------------------------------- + ! Using a bilinear approximation, estimate the velocity at the + ! trajectory midpoint in the (i2,j2) reference frame. + !------------------------------------------------------------------- + + ump = uvel(i2-1,j2-1)*(mpxt-p5)*(mpyt-p5) & + - uvel(i2, j2-1)*(mpxt+p5)*(mpyt-p5) & + + uvel(i2, j2 )*(mpxt+p5)*(mpyt+p5) & + - uvel(i2-1,j2 )*(mpxt-p5)*(mpyt+p5) + + vmp = vvel(i2-1,j2-1)*(mpxt-p5)*(mpyt-p5) & + - vvel(i2, j2-1)*(mpxt+p5)*(mpyt-p5) & + + vvel(i2, j2 )*(mpxt+p5)*(mpyt+p5) & + - vvel(i2-1,j2 )*(mpxt-p5)*(mpyt+p5) + + !------------------------------------------------------------------- + ! Use the midpoint velocity to estimate the coordinates of the + ! departure point relative to corner (i,j). + !------------------------------------------------------------------- + + dpx(i,j) = -dt * ump + dpy(i,j) = -dt * vmp + + endif ! nonzero velocity + + enddo ! i + enddo ! j + endif ! l_dp_midpt end subroutine departure_points @@ -1691,17 +1718,17 @@ subroutine locate_triangles (nx_block, ny_block, & l_fixed_area, edgearea) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain - nghost ! number of ghost cells + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nghost ! number of ghost cells character (len=char_len), intent(in) :: & edge ! 'north' or 'east' real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & - dpx ,&! x coordinates of departure points at cell corners - dpy ,&! y coordinates of departure points at cell corners - dxu ,&! E-W dimension of U-cell (m) + dpx , & ! x coordinates of departure points at cell corners + dpy , & ! y coordinates of departure points at cell corners + dxu , & ! E-W dimension of U-cell (m) dyu ! N-S dimension of U-cell (m) real (kind=dbl_kind), dimension (nx_block,ny_block,0:nvert,ngroups), intent(out) :: & @@ -1711,14 +1738,14 @@ subroutine locate_triangles (nx_block, ny_block, & triarea ! area of departure triangle integer (kind=int_kind), dimension (nx_block,ny_block,ngroups), intent(out) :: & - iflux ,&! i index of cell contributing transport + iflux , & ! i index of cell contributing transport jflux ! j index of cell contributing transport integer (kind=int_kind), dimension (ngroups), intent(out) :: & icells ! number of cells where triarea > puny integer (kind=int_kind), dimension (nx_block*ny_block,ngroups), intent(out) :: & - indxi ,&! compressed index in i-direction + indxi , & ! compressed index in i-direction indxj ! compressed index in j-direction logical, intent(in) :: & @@ -1726,7 +1753,7 @@ subroutine locate_triangles (nx_block, ny_block, & ! passed in as edgearea ! if false, edgearea if determined internally ! and is passed out - + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(inout) :: & edgearea ! area of departure region for each edge ! edgearea > 0 for eastward/northward flow @@ -1734,50 +1761,50 @@ subroutine locate_triangles (nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & - i, j, ij, ic ,&! horizontal indices - ib, ie, jb, je ,&! limits for loops over edges - ng, nv ,&! triangle indices - ishift, jshift ,&! differences between neighbor cells - ishift_tl, jshift_tl ,&! i,j indices of TL cell relative to edge - ishift_bl, jshift_bl ,&! i,j indices of BL cell relative to edge - ishift_tr, jshift_tr ,&! i,j indices of TR cell relative to edge - ishift_br, jshift_br ,&! i,j indices of BR cell relative to edge - ishift_tc, jshift_tc ,&! i,j indices of TC cell relative to edge - ishift_bc, jshift_bc ! i,j indices of BC cell relative to edge + i, j, ij, ic , & ! horizontal indices + ib, ie, jb, je , & ! limits for loops over edges + ng, nv , & ! triangle indices + ishift , jshift , & ! differences between neighbor cells + ishift_tl, jshift_tl , & ! i,j indices of TL cell relative to edge + ishift_bl, jshift_bl , & ! i,j indices of BL cell relative to edge + ishift_tr, jshift_tr , & ! i,j indices of TR cell relative to edge + ishift_br, jshift_br , & ! i,j indices of BR cell relative to edge + ishift_tc, jshift_tc , & ! i,j indices of TC cell relative to edge + ishift_bc, jshift_bc ! i,j indices of BC cell relative to edge integer (kind=int_kind) :: & icellsd ! number of cells where departure area > 0. integer (kind=int_kind), dimension (nx_block*ny_block) :: & - indxid ,&! compressed index in i-direction + indxid , & ! compressed index in i-direction indxjd ! compressed index in j-direction real (kind=dbl_kind), dimension(nx_block,ny_block) :: & - dx, dy ,&! scaled departure points - areafac_c ,&! area scale factor at center of edge - areafac_l ,&! area scale factor at left corner + dx, dy , & ! scaled departure points + areafac_c , & ! area scale factor at center of edge + areafac_l , & ! area scale factor at left corner areafac_r ! area scale factor at right corner real (kind=dbl_kind) :: & - xcl, ycl ,&! coordinates of left corner point + xcl, ycl , & ! coordinates of left corner point ! (relative to midpoint of edge) - xdl, ydl ,&! left departure point - xil, yil ,&! left intersection point - xcr, ycr ,&! right corner point - xdr, ydr ,&! right departure point - xir, yir ,&! right intersection point - xic, yic ,&! x-axis intersection point - xicl, yicl ,&! left-hand x-axis intersection point - xicr, yicr ,&! right-hand x-axis intersection point - xdm, ydm ,&! midpoint of segment connecting DL and DR; + xdl, ydl , & ! left departure point + xil, yil , & ! left intersection point + xcr, ycr , & ! right corner point + xdr, ydr , & ! right departure point + xir, yir , & ! right intersection point + xic, yic , & ! x-axis intersection point + xicl, yicl , & ! left-hand x-axis intersection point + xicr, yicr , & ! right-hand x-axis intersection point + xdm, ydm , & ! midpoint of segment connecting DL and DR; ! shifted if l_fixed_area = T - md ,&! slope of line connecting DL and DR - mdl ,&! slope of line connecting DL and DM - mdr ,&! slope of line connecting DR and DM - area1, area2 ,&! temporary triangle areas - area3, area4 ,&! - area_c ,&! center polygon area - puny ,&! + md , & ! slope of line connecting DL and DR + mdl , & ! slope of line connecting DL and DM + mdr , & ! slope of line connecting DR and DM + area1, area2 , & ! temporary triangle areas + area3, area4 , & ! + area_c , & ! center polygon area + puny , & ! w1, w2 ! work variables real (kind=dbl_kind), dimension (nx_block,ny_block,ngroups) :: & @@ -1785,61 +1812,61 @@ subroutine locate_triangles (nx_block, ny_block, & real (kind=dbl_kind), dimension(nx_block,ny_block) :: & areasum ! sum of triangle areas for a given edge - + character(len=*), parameter :: subname = '(locate_triangles)' - !------------------------------------------------------------------- - ! Triangle notation: - ! For each edge, there are 20 triangles that can contribute, - ! but many of these are mutually exclusive. It turns out that - ! at most 5 triangles can contribute to transport integrals at once. - ! - ! See Figure 3 in DB for pictures of these triangles. - ! See Table 1 in DB for logical conditions. - ! - ! For the north edge, DB refer to these triangles as: - ! (1) NW, NW1, W, W2 - ! (2) NE, NE1, E, E2 - ! (3) NW2, W1, NE2, E1 - ! (4) H1a, H1b, N1a, N1b - ! (5) H2a, H2b, N2a, N2b - ! - ! For the east edge, DB refer to these triangles as: - ! (1) NE, NE1, N, N2 - ! (2) SE, SE1, S, S2 - ! (3) NE2, N1, SE2, S1 - ! (4) H1a, H1b, E1a, E2b - ! (5) H2a, H2b, E2a, E2b - ! - ! The code below works for either north or east edges. - ! The respective triangle labels are: - ! (1) TL, TL1, BL, BL2 - ! (2) TR, TR1, BR, BR2 - ! (3) TL2, BL1, TR2, BR1 - ! (4) BC1a, BC1b, TC1a, TC2b - ! (5) BC2a, BC2b, TC2a, TC2b - ! - ! where the cell labels are: - ! - ! | | - ! TL | TC | TR (top left, center, right) - ! | | - ! ------------------------ - ! | | - ! BL | BC | BR (bottom left, center, right) - ! | | - ! - ! and the transport is across the edge between cells TC and TB. - ! - ! Departure points are scaled to a local coordinate system - ! whose origin is at the midpoint of the edge. - ! In this coordinate system, the lefthand corner CL = (-0.5,0) - ! and the righthand corner CR = (0.5, 0). - !------------------------------------------------------------------- - - !------------------------------------------------------------------- - ! Initialize - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Triangle notation: + ! For each edge, there are 20 triangles that can contribute, + ! but many of these are mutually exclusive. It turns out that + ! at most 5 triangles can contribute to transport integrals at once. + ! + ! See Figure 3 in DB for pictures of these triangles. + ! See Table 1 in DB for logical conditions. + ! + ! For the north edge, DB refer to these triangles as: + ! (1) NW, NW1, W, W2 + ! (2) NE, NE1, E, E2 + ! (3) NW2, W1, NE2, E1 + ! (4) H1a, H1b, N1a, N1b + ! (5) H2a, H2b, N2a, N2b + ! + ! For the east edge, DB refer to these triangles as: + ! (1) NE, NE1, N, N2 + ! (2) SE, SE1, S, S2 + ! (3) NE2, N1, SE2, S1 + ! (4) H1a, H1b, E1a, E2b + ! (5) H2a, H2b, E2a, E2b + ! + ! The code below works for either north or east edges. + ! The respective triangle labels are: + ! (1) TL, TL1, BL, BL2 + ! (2) TR, TR1, BR, BR2 + ! (3) TL2, BL1, TR2, BR1 + ! (4) BC1a, BC1b, TC1a, TC2b + ! (5) BC2a, BC2b, TC2a, TC2b + ! + ! where the cell labels are: + ! + ! | | + ! TL | TC | TR (top left, center, right) + ! | | + ! ------------------------ + ! | | + ! BL | BC | BR (bottom left, center, right) + ! | | + ! + ! and the transport is across the edge between cells TC and TB. + ! + ! Departure points are scaled to a local coordinate system + ! whose origin is at the midpoint of the edge. + ! In this coordinate system, the lefthand corner CL = (-0.5,0) + ! and the righthand corner CR = (0.5, 0). + !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Initialize + !------------------------------------------------------------------- call icepack_query_parameters(puny_out=puny) call icepack_warnings_flush(nu_diag) @@ -1873,7 +1900,7 @@ subroutine locate_triangles (nx_block, ny_block, & ! loop size ib = ilo - ie = ihi + ie = ihi jb = jlo - nghost ! lowest j index is a ghost cell je = jhi @@ -1896,8 +1923,8 @@ subroutine locate_triangles (nx_block, ny_block, & do j = jb, je do i = ib, ie - areafac_l(i,j) = dxu(i-1,j)*dyu(i-1,j) - areafac_r(i,j) = dxu(i,j)*dyu(i,j) + areafac_l(i,j) = dxu(i-1,j)*dyu(i-1,j) + areafac_r(i,j) = dxu(i ,j)*dyu(i ,j) areafac_c(i,j) = p5*(areafac_l(i,j) + areafac_r(i,j)) enddo enddo @@ -1930,7 +1957,7 @@ subroutine locate_triangles (nx_block, ny_block, & do j = jb, je do i = ib, ie - areafac_l(i,j) = dxu(i,j)*dyu(i,j) + areafac_l(i,j) = dxu(i,j )*dyu(i,j ) areafac_r(i,j) = dxu(i,j-1)*dyu(i,j-1) areafac_c(i,j) = p5 * (areafac_l(i,j) + areafac_r(i,j)) enddo @@ -1938,9 +1965,9 @@ subroutine locate_triangles (nx_block, ny_block, & endif - !------------------------------------------------------------------- - ! Compute mask for edges with nonzero departure areas - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute mask for edges with nonzero departure areas + !------------------------------------------------------------------- if (l_fixed_area) then icellsd = 0 @@ -1982,9 +2009,9 @@ subroutine locate_triangles (nx_block, ny_block, & endif ! edge = north/east endif ! l_fixed_area - !------------------------------------------------------------------- - ! Scale the departure points - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Scale the departure points + !------------------------------------------------------------------- do j = 1, je do i = 1, ie @@ -1993,20 +2020,20 @@ subroutine locate_triangles (nx_block, ny_block, & enddo enddo - !------------------------------------------------------------------- - ! Compute departure regions, divide into triangles, and locate - ! vertices of each triangle. - ! Work in a nondimensional coordinate system in which lengths are - ! scaled by the local metric coefficients (dxu and dyu). - ! Note: The do loop includes north faces of the j = 1 ghost cells - ! when edge = 'north'. The loop includes east faces of i = 1 - ! ghost cells when edge = 'east'. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute departure regions, divide into triangles, and locate + ! vertices of each triangle. + ! Work in a nondimensional coordinate system in which lengths are + ! scaled by the local metric coefficients (dxu and dyu). + ! Note: The do loop includes north faces of the j = 1 ghost cells + ! when edge = 'north'. The loop includes east faces of i = 1 + ! ghost cells when edge = 'east'. + !------------------------------------------------------------------- do ij = 1, icellsd i = indxid(ij) j = indxjd(ij) - + xcl = -p5 ycl = c0 @@ -2016,15 +2043,15 @@ subroutine locate_triangles (nx_block, ny_block, & ! Departure points if (trim(edge) == 'north') then ! north edge - xdl = xcl + dx(i-1,j) - ydl = ycl + dy(i-1,j) - xdr = xcr + dx(i,j) - ydr = ycr + dy(i,j) + xdl = xcl + dx(i-1,j ) + ydl = ycl + dy(i-1,j ) + xdr = xcr + dx(i ,j ) + ydr = ycr + dy(i ,j ) else ! east edge; rotate trajectory by pi/2 - xdl = xcl - dy(i,j) - ydl = ycl + dx(i,j) - xdr = xcr - dy(i,j-1) - ydr = ycr + dx(i,j-1) + xdl = xcl - dy(i ,j ) + ydl = ycl + dx(i ,j ) + xdr = xcr - dy(i ,j-1) + ydr = ycr + dx(i ,j-1) endif xdm = p5 * (xdr + xdl) @@ -2034,12 +2061,12 @@ subroutine locate_triangles (nx_block, ny_block, & xil = xcl yil = (xcl*(ydm-ydl) + xdm*ydl - xdl*ydm) / (xdm - xdl) - + xir = xcr - yir = (xcr*(ydr-ydm) - xdm*ydr + xdr*ydm) / (xdr - xdm) - + yir = (xcr*(ydr-ydm) - xdm*ydr + xdr*ydm) / (xdr - xdm) + md = (ydr - ydl) / (xdr - xdl) - + if (abs(md) > puny) then xic = xdl - ydl/md else @@ -2052,14 +2079,14 @@ subroutine locate_triangles (nx_block, ny_block, & xicr = xic yicr = yic - !------------------------------------------------------------------- - ! Locate triangles in TL cell (NW for north edge, NE for east edge) - ! and BL cell (W for north edge, N for east edge). - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Locate triangles in TL cell (NW for north edge, NE for east edge) + ! and BL cell (W for north edge, N for east edge). + !------------------------------------------------------------------- if (yil > c0 .and. xdl < xcl .and. ydl >= c0) then - ! TL (group 1) + ! TL (group 1) ng = 1 xp (i,j,1,ng) = xcl @@ -2074,7 +2101,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (yil < c0 .and. xdl < xcl .and. ydl < c0) then - ! BL (group 1) + ! BL (group 1) ng = 1 xp (i,j,1,ng) = xcl @@ -2089,7 +2116,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (yil < c0 .and. xdl < xcl .and. ydl >= c0) then - ! TL1 (group 1) + ! TL1 (group 1) ng = 1 xp (i,j,1,ng) = xcl @@ -2102,7 +2129,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tl areafact(i,j,ng) = areafac_l(i,j) - ! BL1 (group 3) + ! BL1 (group 3) ng = 3 xp (i,j,1,ng) = xcl @@ -2117,7 +2144,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (yil > c0 .and. xdl < xcl .and. ydl < c0) then - ! TL2 (group 3) + ! TL2 (group 3) ng = 3 xp (i,j,1,ng) = xcl @@ -2130,7 +2157,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tl areafact(i,j,ng) = -areafac_l(i,j) - ! BL2 (group 1) + ! BL2 (group 1) ng = 1 xp (i,j,1,ng) = xcl @@ -2145,14 +2172,14 @@ subroutine locate_triangles (nx_block, ny_block, & endif ! TL and BL triangles - !------------------------------------------------------------------- - ! Locate triangles in TR cell (NE for north edge, SE for east edge) - ! and in BR cell (E for north edge, S for east edge). - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Locate triangles in TR cell (NE for north edge, SE for east edge) + ! and in BR cell (E for north edge, S for east edge). + !------------------------------------------------------------------- if (yir > c0 .and. xdr >= xcr .and. ydr >= c0) then - ! TR (group 2) + ! TR (group 2) ng = 2 xp (i,j,1,ng) = xcr @@ -2167,7 +2194,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (yir < c0 .and. xdr >= xcr .and. ydr < c0) then - ! BR (group 2) + ! BR (group 2) ng = 2 xp (i,j,1,ng) = xcr @@ -2180,9 +2207,9 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_br areafact(i,j,ng) = areafac_r(i,j) - elseif (yir < c0 .and. xdr >= xcr .and. ydr >= c0) then + elseif (yir < c0 .and. xdr >= xcr .and. ydr >= c0) then - ! TR1 (group 2) + ! TR1 (group 2) ng = 2 xp (i,j,1,ng) = xcr @@ -2195,7 +2222,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tr areafact(i,j,ng) = areafac_r(i,j) - ! BR1 (group 3) + ! BR1 (group 3) ng = 3 xp (i,j,1,ng) = xcr @@ -2208,9 +2235,9 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_br areafact(i,j,ng) = areafac_r(i,j) - elseif (yir > c0 .and. xdr >= xcr .and. ydr < c0) then + elseif (yir > c0 .and. xdr >= xcr .and. ydr < c0) then - ! TR2 (group 3) + ! TR2 (group 3) ng = 3 xp (i,j,1,ng) = xcr @@ -2223,9 +2250,9 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tr areafact(i,j,ng) = -areafac_r(i,j) - ! BR2 (group 2) + ! BR2 (group 2) - ng = 2 + ng = 2 xp (i,j,1,ng) = xcr yp (i,j,1,ng) = ycr xp (i,j,2,ng) = xdr @@ -2238,9 +2265,9 @@ subroutine locate_triangles (nx_block, ny_block, & endif ! TR and BR triangles - !------------------------------------------------------------------- - ! Redefine departure points if not located in central cells (TC or BC) - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Redefine departure points if not located in central cells (TC or BC) + !------------------------------------------------------------------- if (xdl < xcl) then xdl = xil @@ -2252,10 +2279,10 @@ subroutine locate_triangles (nx_block, ny_block, & ydr = yir endif - !------------------------------------------------------------------- - ! For l_fixed_area = T, shift the midpoint so that the departure - ! region has the prescribed area - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! For l_fixed_area = T, shift the midpoint so that the departure + ! region has the prescribed area + !------------------------------------------------------------------- if (l_fixed_area) then @@ -2268,21 +2295,21 @@ subroutine locate_triangles (nx_block, ny_block, & yp(i,j,3,ng) & - yp(i,j,2,ng) * & (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & - * areafact(i,j,ng) + * areafact(i,j,ng) ng = 2 area2 = p5 * ( (xp(i,j,2,ng)-xp(i,j,1,ng)) * & yp(i,j,3,ng) & - yp(i,j,2,ng) * & (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & - * areafact(i,j,ng) + * areafact(i,j,ng) ng = 3 area3 = p5 * ( (xp(i,j,2,ng)-xp(i,j,1,ng)) * & yp(i,j,3,ng) & - yp(i,j,2,ng) * & (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & - * areafact(i,j,ng) + * areafact(i,j,ng) !----------------------------------------------------------- ! Check whether the central triangles lie in one grid cell or two. @@ -2337,7 +2364,7 @@ subroutine locate_triangles (nx_block, ny_block, & xdm = p5 * (xdr + xicl) ydm = p5 * ydr - ! compute area of triangle adjacent to left corner + ! compute area of triangle adjacent to left corner area4 = p5 * (xcl - xic) * ydl * areafac_l(i,j) area_c = edgearea(i,j) - area1 - area2 - area3 - area4 @@ -2362,7 +2389,7 @@ subroutine locate_triangles (nx_block, ny_block, & xicr = xic yicr = yic - ! compute midpoint between ICR and DL + ! compute midpoint between ICR and DL xdm = p5 * (xicr + xdl) ydm = p5 * ydl @@ -2390,16 +2417,16 @@ subroutine locate_triangles (nx_block, ny_block, & endif ! l_fixed_area - !------------------------------------------------------------------- - ! Locate triangles in BC cell (H for both north and east edges) - ! and TC cell (N for north edge and E for east edge). - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Locate triangles in BC cell (H for both north and east edges) + ! and TC cell (N for north edge and E for east edge). + !------------------------------------------------------------------- - ! Start with cases where both DPs lie in the same grid cell + ! Start with cases where both DPs lie in the same grid cell if (ydl >= c0 .and. ydr >= c0 .and. ydm >= c0) then - ! TC1a (group 4) + ! TC1a (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2412,7 +2439,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! TC2a (group 5) + ! TC2a (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2425,7 +2452,8 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! TC3a (group 6) + ! TC3a (group 6) + ng = 6 xp (i,j,1,ng) = xdl yp (i,j,1,ng) = ydl @@ -2439,7 +2467,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl >= c0 .and. ydr >= c0 .and. ydm < c0) then ! rare - ! TC1b (group 4) + ! TC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2452,7 +2480,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! TC2b (group 5) + ! TC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2465,7 +2493,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! BC3b (group 6) + ! BC3b (group 6) ng = 6 xp (i,j,1,ng) = xicr @@ -2480,7 +2508,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr < c0 .and. ydm < c0) then - ! BC1a (group 4) + ! BC1a (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2493,7 +2521,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! BC2a (group 5) + ! BC2a (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2506,7 +2534,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! BC3a (group 6) + ! BC3a (group 6) ng = 6 xp (i,j,1,ng) = xdl @@ -2521,7 +2549,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr < c0 .and. ydm >= c0) then ! rare - ! BC1b (group 4) + ! BC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2534,7 +2562,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! BC2b (group 5) + ! BC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2547,7 +2575,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! TC3b (group 6) + ! TC3b (group 6) ng = 6 xp (i,j,1,ng) = xicl @@ -2560,14 +2588,14 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! Now consider cases where the two DPs lie in different grid cells - ! For these cases, one triangle is given the area factor associated - ! with the adjacent corner, to avoid rare negative masses on curved grids. + ! Now consider cases where the two DPs lie in different grid cells + ! For these cases, one triangle is given the area factor associated + ! with the adjacent corner, to avoid rare negative masses on curved grids. elseif (ydl >= c0 .and. ydr < c0 .and. xic >= c0 & .and. ydm >= c0) then - ! TC1b (group 4) + ! TC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2580,7 +2608,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! BC2b (group 5) + ! BC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2593,7 +2621,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_r(i,j) - ! TC3b (group 6) + ! TC3b (group 6) ng = 6 xp (i,j,1,ng) = xdl @@ -2609,7 +2637,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl >= c0 .and. ydr < c0 .and. xic >= c0 & .and. ydm < c0 ) then ! less common - ! TC1b (group 4) + ! TC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2622,7 +2650,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! BC2b (group 5) + ! BC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2635,7 +2663,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_r(i,j) - ! BC3b (group 6) + ! BC3b (group 6) ng = 6 xp (i,j,1,ng) = xicr @@ -2651,7 +2679,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl >= c0 .and. ydr < c0 .and. xic < c0 & .and. ydm < c0) then - ! TC1b (group 4) + ! TC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2664,7 +2692,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_l(i,j) - ! BC2b (group 5) + ! BC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2677,7 +2705,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! BC3b (group 6) + ! BC3b (group 6) ng = 6 xp (i,j,1,ng) = xdr @@ -2693,7 +2721,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl >= c0 .and. ydr < c0 .and. xic < c0 & .and. ydm >= c0) then ! less common - ! TC1b (group 4) + ! TC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2706,7 +2734,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_l(i,j) - ! BC2b (group 5) + ! BC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2719,7 +2747,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! TC3b (group 6) + ! TC3b (group 6) ng = 6 xp (i,j,1,ng) = xicl @@ -2735,7 +2763,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr >= c0 .and. xic < c0 & .and. ydm >= c0) then - ! BC1b (group 4) + ! BC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2748,7 +2776,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_l(i,j) - ! TC2b (group 5) + ! TC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2761,7 +2789,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! TC3b (group 6) + ! TC3b (group 6) ng = 6 xp (i,j,1,ng) = xicl @@ -2777,7 +2805,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr >= c0 .and. xic < c0 & .and. ydm < c0) then ! less common - ! BC1b (group 4) + ! BC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2790,7 +2818,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_l(i,j) - ! TC2b (group 5) + ! TC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2803,7 +2831,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! BC3b (group 6) + ! BC3b (group 6) ng = 6 xp (i,j,1,ng) = xicr @@ -2819,7 +2847,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr >= c0 .and. xic >= c0 & .and. ydm < c0) then - ! BC1b (group 4) + ! BC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2832,7 +2860,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! TC2b (group 5) + ! TC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2845,7 +2873,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_r(i,j) - ! BC3b (group 6) + ! BC3b (group 6) ng = 6 xp (i,j,1,ng) = xicr @@ -2861,7 +2889,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr >= c0 .and. xic >= c0 & .and. ydm >= c0) then ! less common - ! BC1b (group 4) + ! BC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2874,7 +2902,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! TC2b (group 5) + ! TC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2887,7 +2915,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_r(i,j) - ! TC3b (group 6) + ! TC3b (group 6) ng = 6 xp (i,j,1,ng) = xicl @@ -2904,26 +2932,26 @@ subroutine locate_triangles (nx_block, ny_block, & enddo ! ij - !------------------------------------------------------------------- - ! Compute triangle areas with appropriate sign. - ! These are found by computing the area in scaled coordinates and - ! multiplying by a scale factor (areafact). - ! Note that the scale factor is positive for fluxes out of the cell - ! and negative for fluxes into the cell. - ! - ! Note: The triangle area formula below gives A >=0 iff the triangle - ! points x1, x2, and x3 are taken in counterclockwise order. - ! These points are defined above in such a way that the - ! order is nearly always CCW. - ! In rare cases, we may compute A < 0. In this case, - ! the quadrilateral departure area is equal to the - ! difference of two triangle areas instead of the sum. - ! The fluxes work out correctly in the end. - ! - ! Also compute the cumulative area transported across each edge. - ! If l_fixed_area = T, this area is compared to edgearea as a bug check. - ! If l_fixed_area = F, this area is passed as an output array. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute triangle areas with appropriate sign. + ! These are found by computing the area in scaled coordinates and + ! multiplying by a scale factor (areafact). + ! Note that the scale factor is positive for fluxes out of the cell + ! and negative for fluxes into the cell. + ! + ! Note: The triangle area formula below gives A >=0 iff the triangle + ! points x1, x2, and x3 are taken in counterclockwise order. + ! These points are defined above in such a way that the + ! order is nearly always CCW. + ! In rare cases, we may compute A < 0. In this case, + ! the quadrilateral departure area is equal to the + ! difference of two triangle areas instead of the sum. + ! The fluxes work out correctly in the end. + ! + ! Also compute the cumulative area transported across each edge. + ! If l_fixed_area = T, this area is compared to edgearea as a bug check. + ! If l_fixed_area = F, this area is passed as an output array. + !------------------------------------------------------------------- areasum(:,:) = c0 @@ -2938,12 +2966,12 @@ subroutine locate_triangles (nx_block, ny_block, & (yp(i,j,3,ng)-yp(i,j,1,ng)) & - (yp(i,j,2,ng)-yp(i,j,1,ng)) * & (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & - * areafact(i,j,ng) + * areafact(i,j,ng) if (abs(triarea(i,j,ng)) < eps16*areafac_c(i,j)) then triarea(i,j,ng) = c0 else - icells(ng) = icells(ng) + 1 + icells(ng) = icells(ng) + 1 ic = icells(ng) indxi(ic,ng) = i indxj(ic,ng) = j @@ -2955,27 +2983,27 @@ subroutine locate_triangles (nx_block, ny_block, & enddo ! ng if (l_fixed_area) then - if (bugcheck) then ! set bugcheck = F to speed up code - do ij = 1, icellsd - i = indxid(ij) - j = indxjd(ij) - if (abs(areasum(i,j) - edgearea(i,j)) > eps13*areafac_c(i,j)) then - write(nu_diag,*) '' - write(nu_diag,*) 'Areas do not add up: m, i, j, edge =', & - my_task, i, j, trim(edge) - write(nu_diag,*) 'edgearea =', edgearea(i,j) - write(nu_diag,*) 'areasum =', areasum(i,j) - write(nu_diag,*) 'areafac_c =', areafac_c(i,j) - write(nu_diag,*) '' - write(nu_diag,*) 'Triangle areas:' - do ng = 1, ngroups ! not vector friendly - if (abs(triarea(i,j,ng)) > eps16*abs(areafact(i,j,ng))) then - write(nu_diag,*) ng, triarea(i,j,ng) - endif - enddo - endif - enddo - endif ! bugcheck + if (bugcheck) then ! set bugcheck = F to speed up code + do ij = 1, icellsd + i = indxid(ij) + j = indxjd(ij) + if (abs(areasum(i,j) - edgearea(i,j)) > eps13*areafac_c(i,j)) then + write(nu_diag,*) '' + write(nu_diag,*) 'Areas do not add up: m, i, j, edge =', & + my_task, i, j, trim(edge) + write(nu_diag,*) 'edgearea =', edgearea(i,j) + write(nu_diag,*) 'areasum =', areasum(i,j) + write(nu_diag,*) 'areafac_c =', areafac_c(i,j) + write(nu_diag,*) '' + write(nu_diag,*) 'Triangle areas:' + do ng = 1, ngroups ! not vector friendly + if (abs(triarea(i,j,ng)) > eps16*abs(areafact(i,j,ng))) then + write(nu_diag,*) ng, triarea(i,j,ng) + endif + enddo + endif + enddo + endif ! bugcheck else ! l_fixed_area = F do ij = 1, icellsd @@ -2985,10 +3013,10 @@ subroutine locate_triangles (nx_block, ny_block, & enddo endif ! l_fixed_area - !------------------------------------------------------------------- - ! Transform triangle vertices to a scaled coordinate system centered - ! in the cell containing the triangle. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Transform triangle vertices to a scaled coordinate system centered + ! in the cell containing the triangle. + !------------------------------------------------------------------- if (trim(edge) == 'north') then do ng = 1, ngroups @@ -3055,10 +3083,10 @@ end subroutine locate_triangles ! to compute integrals of linear, quadratic, or cubic polynomials, ! using formulas from A.H. Stroud, Approximate Calculation of Multiple ! Integrals, Prentice-Hall, 1971. (Section 8.8, formula 3.1.) -! Linear functions can be integrated exactly by evaluating the function +! Linear functions can be integrated exactly by evaluating the function ! at just one point (the midpoint). Quadratic functions require ! 3 points, and cubics require 4 points. -! The default is cubic, but the code can be sped up slightly using +! The default is cubic, but the code can be sped up slightly using ! linear or quadratic integrals, usually with little loss of accuracy. ! ! The formulas are as follows: @@ -3084,24 +3112,24 @@ subroutine triangle_coordinates (nx_block, ny_block, & xp, yp) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - integral_order ! polynomial order for quadrature integrals + nx_block, ny_block, & ! block dimensions + integral_order ! polynomial order for quadrature integrals integer (kind=int_kind), dimension (ngroups), intent(in) :: & - icells ! number of cells where triarea > puny + icells ! number of cells where triarea > puny integer (kind=int_kind), dimension (nx_block*ny_block,ngroups), intent(in) :: & - indxi ,&! compressed index in i-direction - indxj ! compressed index in j-direction + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction real (kind=dbl_kind), intent(inout), dimension (nx_block, ny_block, 0:nvert, ngroups) :: & - xp, yp ! coordinates of triangle points + xp, yp ! coordinates of triangle points ! local variables integer (kind=int_kind) :: & - i, j, ij ,&! horizontal indices - ng ! triangle index + i, j, ij , & ! horizontal indices + ng ! triangle index character(len=*), parameter :: subname = '(triangle_coordinates)' @@ -3168,10 +3196,10 @@ subroutine triangle_coordinates (nx_block, ny_block, & xp(i,j,2,ng) = p4*xp(i,j,2,ng) + p6*xp(i,j,0,ng) yp(i,j,2,ng) = p4*yp(i,j,2,ng) + p6*yp(i,j,0,ng) - + xp(i,j,3,ng) = p4*xp(i,j,3,ng) + p6*xp(i,j,0,ng) yp(i,j,3,ng) = p4*yp(i,j,3,ng) + p6*yp(i,j,0,ng) - + enddo ! ij enddo ! ng @@ -3202,69 +3230,69 @@ subroutine transport_integrals (nx_block, ny_block, & ty, mtflx) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ,&! block dimensions - ntrace ,&! number of tracers in use - integral_order ! polynomial order for quadrature integrals + nx_block, ny_block , & ! block dimensions + ntrace , & ! number of tracers in use + integral_order ! polynomial order for quadrature integrals integer (kind=int_kind), dimension (ntrace), intent(in) :: & - tracer_type ,&! = 1, 2, or 3 (see comments above) - depend ! tracer dependencies (see above) + tracer_type , & ! = 1, 2, or 3 (see comments above) + depend ! tracer dependencies (see above) integer (kind=int_kind), dimension (ngroups), intent(in) :: & - icells ! number of cells where triarea > puny + icells ! number of cells where triarea > puny integer (kind=int_kind), dimension (nx_block*ny_block,ngroups), intent(in) :: & - indxi ,&! compressed index in i-direction - indxj ! compressed index in j-direction + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction real (kind=dbl_kind), intent(in), dimension (nx_block, ny_block, 0:nvert, ngroups) :: & - xp, yp ! coordinates of triangle points + xp, yp ! coordinates of triangle points real (kind=dbl_kind), intent(in), dimension (nx_block, ny_block, ngroups) :: & - triarea ! triangle area + triarea ! triangle area integer (kind=int_kind), intent(in), dimension (nx_block, ny_block, ngroups) :: & - iflux ,& - jflux + iflux ,& + jflux real (kind=dbl_kind), intent(in), dimension (nx_block, ny_block) :: & - mc, mx, my + mc, mx, my real (kind=dbl_kind), intent(out), dimension (nx_block, ny_block) :: & - mflx + mflx real (kind=dbl_kind), intent(in), dimension (nx_block, ny_block, ntrace), optional :: & - tc, tx, ty + tc, tx, ty real (kind=dbl_kind), intent(out), dimension (nx_block, ny_block, ntrace), optional :: & - mtflx + mtflx ! local variables integer (kind=int_kind) :: & - i, j, ij ,&! horizontal indices of edge - i2, j2 ,&! horizontal indices of cell contributing transport - ng ,&! triangle index - nt, nt1 ! tracer indices + i, j, ij , & ! horizontal indices of edge + i2, j2 , & ! horizontal indices of cell contributing transport + ng , & ! triangle index + nt, nt1 ! tracer indices real (kind=dbl_kind) :: & - m0, m1, m2, m3 ,&! mass field at internal points - w0, w1, w2, w3 ! work variables + m0, m1, m2, m3 , & ! mass field at internal points + w0, w1, w2, w3 ! work variables real (kind=dbl_kind), dimension (nx_block, ny_block) :: & - msum, mxsum, mysum ,&! sum of mass, mass*x, and mass*y - mxxsum, mxysum, myysum ! sum of mass*x*x, mass*x*y, mass*y*y + msum, mxsum, mysum , & ! sum of mass, mass*x, and mass*y + mxxsum, mxysum, myysum ! sum of mass*x*x, mass*x*y, mass*y*y real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace) :: & - mtsum ,&! sum of mass*tracer - mtxsum ,&! sum of mass*tracer*x - mtysum ! sum of mass*tracer*y + mtsum , & ! sum of mass*tracer + mtxsum , & ! sum of mass*tracer*x + mtysum ! sum of mass*tracer*y character(len=*), parameter :: subname = '(transport_integrals)' - !------------------------------------------------------------------- - ! Initialize - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Initialize + !------------------------------------------------------------------- mflx(:,:) = c0 if (present(mtflx)) then @@ -3273,9 +3301,9 @@ subroutine transport_integrals (nx_block, ny_block, & enddo endif - !------------------------------------------------------------------- - ! Main loop - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Main loop + !------------------------------------------------------------------- do ng = 1, ngroups @@ -3297,11 +3325,11 @@ subroutine transport_integrals (nx_block, ny_block, & mflx(i,j) = mflx(i,j) + triarea(i,j,ng)*msum(i,j) ! quantities needed for tracer transports - mxsum(i,j) = m0*xp(i,j,0,ng) - mxxsum(i,j) = mxsum(i,j)*xp(i,j,0,ng) - mxysum(i,j) = mxsum(i,j)*yp(i,j,0,ng) - mysum(i,j) = m0*yp(i,j,0,ng) - myysum(i,j) = mysum(i,j)*yp(i,j,0,ng) + mxsum(i,j) = m0*xp(i,j,0,ng) + mxxsum(i,j) = mxsum(i,j)*xp(i,j,0,ng) + mxysum(i,j) = mxsum(i,j)*yp(i,j,0,ng) + mysum(i,j) = m0*yp(i,j,0,ng) + myysum(i,j) = mysum(i,j)*yp(i,j,0,ng) enddo ! ij elseif (integral_order == 2) then ! quadratic (3-point formula) @@ -3333,7 +3361,7 @@ subroutine transport_integrals (nx_block, ny_block, & mxsum(i,j) = w1 + w2 + w3 mxxsum(i,j) = w1*xp(i,j,1,ng) + w2*xp(i,j,2,ng) & - + w3*xp(i,j,3,ng) + + w3*xp(i,j,3,ng) mxysum(i,j) = w1*yp(i,j,1,ng) + w2*yp(i,j,2,ng) & + w3*yp(i,j,3,ng) @@ -3493,16 +3521,16 @@ subroutine update_fields (nx_block, ny_block, & tm) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain ntrace ! number of tracers in use integer (kind=int_kind), dimension (ntrace), intent(in) :: & - tracer_type ,&! = 1, 2, or 3 (see comments above) + tracer_type , & ! = 1, 2, or 3 (see comments above) depend ! tracer dependencies (see above) real (kind=dbl_kind), dimension (nx_block, ny_block), intent(in) :: & - mflxe, mflxn ,&! mass transport across east and north cell edges + mflxe, mflxn , & ! mass transport across east and north cell edges tarear ! 1/tarea real (kind=dbl_kind), dimension (nx_block, ny_block), intent(inout) :: & @@ -3518,12 +3546,12 @@ subroutine update_fields (nx_block, ny_block, & l_stop ! if true, abort on return integer (kind=int_kind), intent(inout) :: & - istop, jstop ! indices of grid cell where model aborts + istop, jstop ! indices of grid cell where model aborts ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices + i, j , & ! horizontal indices nt, nt1, nt2 ! tracer indices real (kind=dbl_kind), dimension(nx_block,ny_block,ntrace) :: & @@ -3534,18 +3562,18 @@ subroutine update_fields (nx_block, ny_block, & w1 ! work variable integer (kind=int_kind), dimension(nx_block*ny_block) :: & - indxi ,&! compressed indices in i and j directions + indxi , & ! compressed indices in i and j directions indxj integer (kind=int_kind) :: & - icells ,&! number of cells with mm > 0. + icells , & ! number of cells with mm > 0. ij ! combined i/j horizontal index character(len=*), parameter :: subname = '(update_fields)' - !------------------------------------------------------------------- - ! Save starting values of mass*tracer - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Save starting values of mass*tracer + !------------------------------------------------------------------- call icepack_query_parameters(puny_out=puny) call icepack_warnings_flush(nu_diag) @@ -3580,15 +3608,15 @@ subroutine update_fields (nx_block, ny_block, & enddo ! nt endif ! present(tm) - !------------------------------------------------------------------- - ! Update mass field - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Update mass field + !------------------------------------------------------------------- do j = jlo, jhi do i = ilo, ihi - w1 = mflxe(i,j) - mflxe(i-1,j) & - + mflxn(i,j) - mflxn(i,j-1) + w1 = mflxe(i,j) - mflxe(i-1,j ) & + + mflxn(i,j) - mflxn(i ,j-1) mm(i,j) = mm(i,j) - w1*tarear(i,j) if (mm(i,j) < -puny) then ! abort with negative value @@ -3605,8 +3633,8 @@ subroutine update_fields (nx_block, ny_block, & if (l_stop) then i = istop j = jstop - w1 = mflxe(i,j) - mflxe(i-1,j) & - + mflxn(i,j) - mflxn(i,j-1) + w1 = mflxe(i,j) - mflxe(i-1,j ) & + + mflxn(i,j) - mflxn(i ,j-1) write (nu_diag,*) ' ' write (nu_diag,*) 'New mass < 0, i, j =', i, j write (nu_diag,*) 'Old mass =', mm(i,j) + w1*tarear(i,j) @@ -3615,9 +3643,9 @@ subroutine update_fields (nx_block, ny_block, & return endif - !------------------------------------------------------------------- - ! Update tracers - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Update tracers + !------------------------------------------------------------------- if (present(tm)) then @@ -3646,8 +3674,8 @@ subroutine update_fields (nx_block, ny_block, & i = indxi(ij) j = indxj(ij) - w1 = mtflxe(i,j,nt) - mtflxe(i-1,j,nt) & - + mtflxn(i,j,nt) - mtflxn(i,j-1,nt) + w1 = mtflxe(i,j,nt) - mtflxe(i-1,j ,nt) & + + mtflxn(i,j,nt) - mtflxn(i ,j-1,nt) tm(i,j,nt) = (mtold(i,j,nt) - w1*tarear(i,j)) & / mm(i,j) enddo ! ij @@ -3660,8 +3688,8 @@ subroutine update_fields (nx_block, ny_block, & j = indxj(ij) if (abs(tm(i,j,nt1)) > c0) then - w1 = mtflxe(i,j,nt) - mtflxe(i-1,j,nt) & - + mtflxn(i,j,nt) - mtflxn(i,j-1,nt) + w1 = mtflxe(i,j,nt) - mtflxe(i-1,j ,nt) & + + mtflxn(i,j,nt) - mtflxn(i ,j-1,nt) tm(i,j,nt) = (mtold(i,j,nt) - w1*tarear(i,j)) & / (mm(i,j) * tm(i,j,nt1)) endif @@ -3678,8 +3706,8 @@ subroutine update_fields (nx_block, ny_block, & if (abs(tm(i,j,nt1)) > c0 .and. & abs(tm(i,j,nt2)) > c0) then - w1 = mtflxe(i,j,nt) - mtflxe(i-1,j,nt) & - + mtflxn(i,j,nt) - mtflxn(i,j-1,nt) + w1 = mtflxe(i,j,nt) - mtflxe(i-1,j ,nt) & + + mtflxn(i,j,nt) - mtflxn(i ,j-1,nt) tm(i,j,nt) = (mtold(i,j,nt) - w1*tarear(i,j)) & / (mm(i,j) * tm(i,j,nt2) * tm(i,j,nt1)) endif diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 23fb9df63..f5289c922 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -33,29 +33,31 @@ module ice_flux !----------------------------------------------------------------- ! Dynamics component + ! All variables are assumed to be on the atm or ocn thermodynamic + ! grid except as noted !----------------------------------------------------------------- real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & ! in from atmos (if .not.calc_strair) - strax , & ! wind stress components (N/m^2) - stray , & ! + strax , & ! wind stress components (N/m^2), on grid_atm_dynu + stray , & ! on grid_atm_dynv ! in from ocean - uocn , & ! ocean current, x-direction (m/s) - vocn , & ! ocean current, y-direction (m/s) - ss_tltx , & ! sea surface slope, x-direction (m/m) - ss_tlty , & ! sea surface slope, y-direction + uocn , & ! ocean current, x-direction (m/s), on grid_ocn_dynu + vocn , & ! ocean current, y-direction (m/s), on grid_ocn_dynv + ss_tltx , & ! sea surface slope, x-direction (m/m), on grid_ocn_dynu + ss_tlty , & ! sea surface slope, y-direction, on grid_ocn_dynv hwater , & ! water depth for seabed stress calc (landfast ice) ! out to atmosphere - strairxT, & ! stress on ice by air, x-direction - strairyT, & ! stress on ice by air, y-direction + strairxT, & ! stress on ice by air, x-direction at T points, computed in icepack + strairyT, & ! stress on ice by air, y-direction at T points, computed in icepack ! out to ocean T-cell (kg/m s^2) ! Note, CICE_IN_NEMO uses strocnx and strocny for coupling - strocnxT, & ! ice-ocean stress, x-direction - strocnyT ! ice-ocean stress, y-direction + strocnxT, & ! ice-ocean stress, x-direction at T points, mapped from strocnx, per ice fraction + strocnyT ! ice-ocean stress, y-direction at T points, mapped from strocny, per ice fraction ! diagnostic @@ -65,14 +67,34 @@ module ice_flux sigP , & ! internal ice pressure (N/m) taubx , & ! seabed stress (x) (N/m^2) tauby , & ! seabed stress (y) (N/m^2) - strairx , & ! stress on ice by air, x-direction - strairy , & ! stress on ice by air, y-direction - strocnx , & ! ice-ocean stress, x-direction - strocny , & ! ice-ocean stress, y-direction + strairx , & ! stress on ice by air, x-direction at U points, mapped from strairxT + strairy , & ! stress on ice by air, y-direction at U points, mapped from strairyT + strocnx , & ! ice-ocean stress, x-direction at U points, computed in dyn_finish + strocny , & ! ice-ocean stress, y-direction at U points, computed in dyn_finish strtltx , & ! stress due to sea surface slope, x-direction strtlty , & ! stress due to sea surface slope, y-direction strintx , & ! divergence of internal ice stress, x (N/m^2) strinty , & ! divergence of internal ice stress, y (N/m^2) + taubxN , & ! seabed stress (x) at N points (N/m^2) + taubyN , & ! seabed stress (y) at N points (N/m^2) + strairxN, & ! stress on ice by air, x-direction at N points, mapped from strairxT + strairyN, & ! stress on ice by air, y-direction at N points, mapped from strairyT + strocnxN, & ! ice-ocean stress, x-direction at N points, computed in dyn_finish + strocnyN, & ! ice-ocean stress, y-direction at N points, computed in dyn_finish + strtltxN, & ! stress due to sea surface slope, x-direction at N points + strtltyN, & ! stress due to sea surface slope, y-direction at N points + strintxN, & ! divergence of internal ice stress, x at N points (N/m^2) + strintyN, & ! divergence of internal ice stress, y at N points (N/m^2) + taubxE , & ! seabed stress (x) at E points (N/m^2) + taubyE , & ! seabed stress (y) at E points (N/m^2) + strairxE, & ! stress on ice by air, x-direction at E points, mapped from strairxT + strairyE, & ! stress on ice by air, y-direction at E points, mapped from strairyT + strocnxE, & ! ice-ocean stress, x-direction at E points, computed in dyn_finish + strocnyE, & ! ice-ocean stress, y-direction at E points, computed in dyn_finish + strtltxE, & ! stress due to sea surface slope, x-direction at E points + strtltyE, & ! stress due to sea surface slope, y-direction at E points + strintxE, & ! divergence of internal ice stress, x at E points (N/m^2) + strintyE, & ! divergence of internal ice stress, y at E points (N/m^2) daidtd , & ! ice area tendency due to transport (1/s) dvidtd , & ! ice volume tendency due to transport (m/s) dagedtd , & ! ice age tendency due to transport (s/s) @@ -102,17 +124,32 @@ module ice_flux ! ice stress tensor in each corner of T cell (kg/s^2) stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 - stress12_1,stress12_2,stress12_3,stress12_4 ! sigma12 + stress12_1,stress12_2,stress12_3,stress12_4, & ! sigma12 + ! ice stress tensor at U and T locations (grid_ice = 'C|CD') (kg/s^2) + stresspT, stressmT, stress12T, & ! sigma11+sigma22, sigma11-sigma22, sigma12 + stresspU, stressmU, stress12U ! " logical (kind=log_kind), & dimension (:,:,:), allocatable, public :: & iceumask ! ice extent mask (U-cell) + logical (kind=log_kind), & + dimension (:,:,:), allocatable, public :: & + icenmask ! ice extent mask (N-cell) + + logical (kind=log_kind), & + dimension (:,:,:), allocatable, public :: & + iceemask ! ice extent mask (E-cell) + ! internal real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & fm , & ! Coriolis param. * mass in U-cell (kg/s) - Tbu ! factor for seabed stress (N/m^2) + Tbu , & ! factor for seabed stress (N/m^2) + fmE , & ! Coriolis param. * mass in E-cell (kg/s) + TbE , & ! factor for seabed stress (N/m^2) + fmN , & ! Coriolis param. * mass in N-cell (kg/s) + TbN ! factor for seabed stress (N/m^2) !----------------------------------------------------------------- ! Thermodynamic component @@ -123,9 +160,9 @@ module ice_flux real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & zlvl , & ! atm level height (momentum) (m) zlvs , & ! atm level height (scalar quantities) (m) - uatm , & ! wind velocity components (m/s) - vatm , & - wind , & ! wind speed (m/s) + uatm , & ! wind velocity components (m/s), on grid_atm_dynu + vatm , & ! on grid_atm_dynv + wind , & ! wind speed (m/s) , on grid_atm_dynu potT , & ! air potential temperature (K) Tair , & ! air temperature (K) Qa , & ! specific humidity (kg/kg) @@ -327,6 +364,8 @@ module ice_flux !----------------------------------------------------------------- real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + uatmT , & ! uatm mapped to T grid (m/s) + vatmT , & ! vatm mapped to T grid (m/s) rside , & ! fraction of ice that melts laterally fside , & ! lateral heat flux (W/m^2) fsw , & ! incoming shortwave radiation (W/m^2) @@ -348,6 +387,8 @@ module ice_flux ! subroutine alloc_flux + use ice_grid, only : grid_ice + integer (int_kind) :: ierr allocate( & @@ -498,6 +539,8 @@ subroutine alloc_flux fswthru_ai (nx_block,ny_block,max_blocks), & ! shortwave penetrating to ocean (W/m^2) fresh_da (nx_block,ny_block,max_blocks), & ! fresh water flux to ocean due to data assim (kg/m^2/s) fsalt_da (nx_block,ny_block,max_blocks), & ! salt flux to ocean due to data assimilation(kg/m^2/s) + uatmT (nx_block,ny_block,max_blocks), & ! uatm mapped to T grid + vatmT (nx_block,ny_block,max_blocks), & ! vatm mapped to T grid rside (nx_block,ny_block,max_blocks), & ! fraction of ice that melts laterally fside (nx_block,ny_block,max_blocks), & ! lateral melt rate (W/m^2) fsw (nx_block,ny_block,max_blocks), & ! incoming shortwave radiation (W/m^2) @@ -537,6 +580,43 @@ subroutine alloc_flux stat=ierr) if (ierr/=0) call abort_ice('(alloc_flux): Out of memory') + if (grid_ice == "CD" .or. grid_ice == "C") & + allocate( & + taubxN (nx_block,ny_block,max_blocks), & ! seabed stress (x) at N points (N/m^2) + taubyN (nx_block,ny_block,max_blocks), & ! seabed stress (y) at N points (N/m^2) + strairxN (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at N points + strairyN (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction at N points + strocnxN (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction at N points + strocnyN (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction at N points + strtltxN (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, x-direction at N points + strtltyN (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at N points + strintxN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at N points (N/m^2) + strintyN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at N points (N/m^2) + icenmask (nx_block,ny_block,max_blocks), & ! ice extent mask (N-cell) + fmN (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in N-cell (kg/s) + TbN (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) + taubxE (nx_block,ny_block,max_blocks), & ! seabed stress (x) at E points (N/m^2) + taubyE (nx_block,ny_block,max_blocks), & ! seabed stress (y) at E points (N/m^2) + strairxE (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at E points + strairyE (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction at E points + strocnxE (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction at E points + strocnyE (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction at E points + strtltxE (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, x-direction at E points + strtltyE (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at E points + strintxE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at E points (N/m^2) + strintyE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at E points (N/m^2) + iceemask (nx_block,ny_block,max_blocks), & ! ice extent mask (E-cell) + fmE (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in E-cell (kg/s) + TbE (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) + stresspT (nx_block,ny_block,max_blocks), & ! sigma11+sigma22 + stressmT (nx_block,ny_block,max_blocks), & ! sigma11-sigma22 + stress12T (nx_block,ny_block,max_blocks), & ! sigma12 + stresspU (nx_block,ny_block,max_blocks), & ! sigma11+sigma22 + stressmU (nx_block,ny_block,max_blocks), & ! sigma11-sigma22 + stress12U (nx_block,ny_block,max_blocks), & ! sigma12 + stat=ierr) + if (ierr/=0) call abort_ice('(alloc_flux): Out of memory') + end subroutine alloc_flux !======================================================================= @@ -649,13 +729,13 @@ subroutine init_coupler_flux ! fluxes received from ocean !----------------------------------------------------------------- - ss_tltx(:,:,:)= c0 ! sea surface tilt (m/m) - ss_tlty(:,:,:)= c0 - uocn (:,:,:) = c0 ! surface ocean currents (m/s) - vocn (:,:,:) = c0 - frzmlt(:,:,:) = c0 ! freezing/melting potential (W/m^2) - frzmlt_init(:,:,:) = c0 ! freezing/melting potential (W/m^2) - sss (:,:,:) = 34.0_dbl_kind ! sea surface salinity (ppt) + ss_tltx (:,:,:) = c0 ! sea surface tilt (m/m) + ss_tlty (:,:,:) = c0 + uocn (:,:,:) = c0 ! surface ocean currents (m/s) + vocn (:,:,:) = c0 + frzmlt (:,:,:) = c0 ! freezing/melting potential (W/m^2) + frzmlt_init(:,:,:) = c0 ! freezing/melting potential (W/m^2) + sss (:,:,:) = 34.0_dbl_kind ! sea surface salinity (ppt) do iblk = 1, size(Tf,3) do j = 1, size(Tf,2) diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index cede58950..2d127ecb2 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -19,7 +19,9 @@ module ice_forcing use ice_kinds_mod + use ice_boundary, only: ice_HaloUpdate use ice_blocks, only: nx_block, ny_block + use ice_domain, only: halo_info use ice_domain_size, only: ncat, max_blocks, nx_global, ny_global use ice_communicate, only: my_task, master_task use ice_calendar, only: istep, istep1, & @@ -117,13 +119,15 @@ module ice_forcing character(char_len), public :: & atm_data_format, & ! 'bin'=binary or 'nc'=netcdf ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf - atm_data_type, & ! 'default', 'monthly', 'ncar', - ! 'hadgem' or 'oned' or + atm_data_type, & ! 'default', 'monthly', 'ncar', 'box2001' + ! 'hadgem', 'oned', 'calm', 'uniform' ! 'JRA55_gx1' or 'JRA55_gx3' or 'JRA55_tx1' bgc_data_type, & ! 'default', 'clim' - ocn_data_type, & ! 'default', 'clim', 'ncar', 'oned', - ! 'hadgem_sst' or 'hadgem_sst_uvocn' - ice_data_type, & ! 'default', 'box2001', 'boxslotcyl' + ocn_data_type, & ! 'default', 'clim', 'ncar', 'oned', 'calm', 'box2001' + ! 'hadgem_sst' or 'hadgem_sst_uvocn', 'uniform' + ice_data_type, & ! 'latsst', 'box2001', 'boxslotcyl', etc + ice_data_conc, & ! 'p5','p8','p9','c1','parabolic' + ice_data_dist, & ! 'box2001','gauss', 'uniform' precip_units ! 'mm_per_month', 'mm_per_sec', 'mks','m_per_sec' logical (kind=log_kind), public :: & @@ -309,9 +313,25 @@ subroutine init_forcing_atmo elseif (trim(atm_data_type) == 'ISPOL') then call ISPOL_files elseif (trim(atm_data_type) == 'box2001') then - call box2001_data + call box2001_data_atm + elseif (trim(atm_data_type) == 'uniform_northeast') then + call uniform_data_atm('NE') + elseif (trim(atm_data_type) == 'uniform_north') then + call uniform_data_atm('N') + elseif (trim(atm_data_type) == 'uniform_east') then + call uniform_data_atm('E') + elseif (trim(atm_data_type) == 'uniform_south') then + call uniform_data_atm('S') + elseif (trim(atm_data_type) == 'uniform_west') then + call uniform_data_atm('W') + elseif (trim(atm_data_type) == 'calm') then + call uniform_data_atm('N',c0) ! direction does not matter when c0 elseif (trim(atm_data_type) == 'hycom') then call hycom_atm_files + elseif (trim(atm_data_type) == 'default') then + ! don't need to do anything more + else + call abort_ice (error_message=subname//' ERROR atm_data_type unknown = '//trim(atm_data_type), file=__FILE__, line=__LINE__) endif end subroutine init_forcing_atmo @@ -465,10 +485,7 @@ subroutine init_forcing_ocn(dt) enddo !$OMP END PARALLEL DO - endif ! init_sst_data - - - if (trim(ocn_data_type) == 'hadgem_sst' .or. & + elseif (trim(ocn_data_type) == 'hadgem_sst' .or. & trim(ocn_data_type) == 'hadgem_sst_uvocn') then diag = .true. ! write diagnostic information @@ -500,15 +517,29 @@ subroutine init_forcing_ocn(dt) enddo !$OMP END PARALLEL DO - endif ! ocn_data_type - - if (trim(ocn_data_type) == 'ncar') then + elseif (trim(ocn_data_type) == 'ncar') then call ocn_data_ncar_init ! call ocn_data_ncar_init_3D - endif - if (trim(ocn_data_type) == 'hycom') then + elseif (trim(ocn_data_type) == 'hycom') then call ocn_data_hycom_init + + elseif (trim(ocn_data_type) == 'box2001') then + call box2001_data_ocn + + ! uniform forcing options + elseif (trim(ocn_data_type) == 'uniform_northeast') then + call uniform_data_ocn('NE',p1) + elseif (trim(ocn_data_type) == 'uniform_east') then + call uniform_data_ocn('E',p1) + elseif (trim(ocn_data_type) == 'uniform_north') then + call uniform_data_ocn('N',p1) + elseif (trim(ocn_data_type) == 'calm') then + call uniform_data_ocn('N',c0) ! directon does not matter for c0 + elseif (trim(ocn_data_type) == 'default') then + ! don't need to do anything more + else + call abort_ice (error_message=subname//' ERROR ocn_data_type unknown = '//trim(ocn_data_type), file=__FILE__, line=__LINE__) endif end subroutine init_forcing_ocn @@ -558,8 +589,7 @@ subroutine get_forcing_atmo ! Get atmospheric forcing data and interpolate as necessary use ice_blocks, only: block, get_block - use ice_boundary, only: ice_HaloUpdate - use ice_domain, only: nblocks, blocks_ice, halo_info + use ice_domain, only: nblocks, blocks_ice use ice_flux, only: Tair, fsw, flw, frain, fsnow, Qa, rhoa, & uatm, vatm, strax, stray, zlvl, wind, swvdr, swvdf, swidr, swidf, & potT, sst @@ -625,9 +655,24 @@ subroutine get_forcing_atmo elseif (trim(atm_data_type) == 'oned') then call oned_data elseif (trim(atm_data_type) == 'box2001') then - call box2001_data + call box2001_data_atm + elseif (trim(atm_data_type) == 'uniform_northeast') then + call uniform_data_atm('NE') + elseif (trim(atm_data_type) == 'uniform_north') then + call uniform_data_atm('N') + elseif (trim(atm_data_type) == 'uniform_east') then + call uniform_data_atm('E') + elseif (trim(atm_data_type) == 'uniform_south') then + call uniform_data_atm('S') + elseif (trim(atm_data_type) == 'uniform_west') then + call uniform_data_atm('W') + elseif (trim(atm_data_type) == 'calm') then + call uniform_data_atm('N',c0) ! direction does not matter when c0 elseif (trim(atm_data_type) == 'hycom') then call hycom_atm_data + !elseif (trim(atm_data_type) == 'uniform_northeast') then + !elseif (trim(atm_data_type) == 'uniform_east') then + !elseif (trim(atm_data_type) == 'uniform_north') then else ! default values set in init_flux return endif @@ -639,7 +684,7 @@ subroutine get_forcing_atmo !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -725,6 +770,18 @@ subroutine get_forcing_ocn (dt) elseif (trim(ocn_data_type) == 'hycom') then ! call ocn_data_hycom(dt) !MHRI: NOT IMPLEMENTED YET + elseif (trim(ocn_data_type) == 'box2001') then + call box2001_data_ocn + ! uniform forcing options + elseif (trim(ocn_data_type) == 'uniform_northeast') then +! tcraig, not time varying + call uniform_data_ocn('NE',p1) + elseif (trim(ocn_data_type) == 'uniform_east') then + call uniform_data_ocn('E',p1) + elseif (trim(ocn_data_type) == 'uniform_north') then + call uniform_data_ocn('N',p1) + elseif (trim(ocn_data_type) == 'calm') then + call uniform_data_ocn('N',c0) ! directon does not matter for c0 endif call ice_timer_stop(timer_forcing) @@ -2383,7 +2440,7 @@ subroutine LY_data enddo ! AOMIP - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -3425,7 +3482,7 @@ subroutine monthly_data enddo ! AOMIP - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -3954,7 +4011,7 @@ subroutine ocn_data_ncar_init_3D use ice_blocks, only: nx_block, ny_block use ice_domain_size, only: max_blocks - use ice_grid, only: to_ugrid, ANGLET + use ice_grid, only: grid_average_X2Y, ANGLET use ice_read_write, only: ice_read_nc_uv #ifdef USE_NETCDF use netcdf @@ -4072,8 +4129,8 @@ subroutine ocn_data_ncar_init_3D work1(:,:,:) = ocn_frc_m(:,:,:,n ,m) work2(:,:,:) = ocn_frc_m(:,:,:,n+1,m) - call to_ugrid(work1,ocn_frc_m(:,:,:,n ,m)) - call to_ugrid(work2,ocn_frc_m(:,:,:,n+1,m)) + call grid_average_X2Y('F',work1,'T',ocn_frc_m(:,:,:,n ,m),'U') + call grid_average_X2Y('F',work2,'T',ocn_frc_m(:,:,:,n+1,m),'U') enddo ! month loop enddo ! field loop @@ -4315,7 +4372,7 @@ subroutine ocn_data_hadgem(dt) use ice_domain, only: nblocks use ice_flux, only: sst, uocn, vocn - use ice_grid, only: t2ugrid_vector, ANGLET + use ice_grid, only: grid_average_X2Y, ANGLET real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -4330,6 +4387,9 @@ subroutine ocn_data_hadgem(dt) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & sstdat ! data value toward which SST is restored + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 ! temporary array + real (kind=dbl_kind) :: workx, worky logical (kind=log_kind) :: readm @@ -4474,8 +4534,11 @@ subroutine ocn_data_hadgem(dt) ! Interpolate to U grid !----------------------------------------------------------------- - call t2ugrid_vector(uocn) - call t2ugrid_vector(vocn) + ! tcraig, this is now computed in dynamics for consistency + !work1 = uocn + !call grid_average_X2Y('F',work1,'T',uocn,'U') + !work1 = vocn + !call grid_average_X2Y('F',work1,'T',vocn,'U') endif ! ocn_data_type = hadgem_sst_uvocn @@ -5246,18 +5309,17 @@ end subroutine ocn_data_ispol_init !======================================================================= ! - subroutine box2001_data + subroutine box2001_data_atm ! wind and current fields as in Hunke, JCP 2001 ! these are defined at the u point ! authors: Elizabeth Hunke, LANL - use ice_domain, only: nblocks + use ice_domain, only: nblocks, blocks_ice use ice_domain_size, only: max_blocks use ice_calendar, only: timesecs - use ice_blocks, only: nx_block, ny_block, nghost - use ice_flux, only: uocn, vocn, uatm, vatm, wind, rhoa, strax, stray - use ice_grid, only: uvm, to_ugrid + use ice_blocks, only: block, get_block, nx_block, ny_block, nghost + use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray use ice_state, only: aice ! local parameters @@ -5265,53 +5327,61 @@ subroutine box2001_data integer (kind=int_kind) :: & iblk, i,j ! loop indices - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - aiu ! ice fraction on u-grid + integer (kind=int_kind) :: & + iglob(nx_block), & ! global indices + jglob(ny_block) ! global indices + + type (block) :: & + this_block ! block information for current block real (kind=dbl_kind) :: & - secday, pi , puny, period, pi2, tau + secday, pi , puny, period, pi2, tau - character(len=*), parameter :: subname = '(box2001_data)' + character(len=*), parameter :: subname = '(box2001_data_atm)' if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) call icepack_query_parameters(secday_out=secday) - call to_ugrid(aice, aiu) - period = c4*secday do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - ! ocean current - ! constant in time, could be initialized in ice_flux.F90 - uocn(i,j,iblk) = p2*real(j-nghost, kind=dbl_kind) & - / real(nx_global,kind=dbl_kind) - p1 - vocn(i,j,iblk) = -p2*real(i-nghost, kind=dbl_kind) & - / real(ny_global,kind=dbl_kind) + p1 - - uocn(i,j,iblk) = uocn(i,j,iblk) * uvm(i,j,iblk) - vocn(i,j,iblk) = vocn(i,j,iblk) * uvm(i,j,iblk) + this_block = get_block(blocks_ice(iblk),iblk) + iglob = this_block%i_glob + jglob = this_block%j_glob + +!tcraig, move to box2001_data_ocn +! ! ocean current +! ! constant in time, could be initialized in ice_flux.F90 +! uocn(i,j,iblk) = p2*real(j-nghost, kind=dbl_kind) & +! / real(nx_global,kind=dbl_kind) - p1 +! vocn(i,j,iblk) = -p2*real(i-nghost, kind=dbl_kind) & +! / real(ny_global,kind=dbl_kind) + p1 +! +! uocn(i,j,iblk) = uocn(i,j,iblk) * uvm(i,j,iblk) +! vocn(i,j,iblk) = vocn(i,j,iblk) * uvm(i,j,iblk) ! wind components uatm(i,j,iblk) = c5 + (sin(pi2*timesecs/period)-c3) & - * sin(pi2*real(i-nghost, kind=dbl_kind) & + * sin(pi2*real(iglob(i), kind=dbl_kind) & /real(nx_global,kind=dbl_kind)) & - * sin(pi *real(j-nghost, kind=dbl_kind) & + * sin(pi *real(jglob(j), kind=dbl_kind) & /real(ny_global,kind=dbl_kind)) vatm(i,j,iblk) = c5 + (sin(pi2*timesecs/period)-c3) & - * sin(pi *real(i-nghost, kind=dbl_kind) & + * sin(pi *real(iglob(i), kind=dbl_kind) & /real(nx_global,kind=dbl_kind)) & - * sin(pi2*real(j-nghost, kind=dbl_kind) & + * sin(pi2*real(jglob(j), kind=dbl_kind) & /real(ny_global,kind=dbl_kind)) ! wind stress wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2) tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk) - strax(i,j,iblk) = aiu(i,j,iblk) * tau * uatm(i,j,iblk) - stray(i,j,iblk) = aiu(i,j,iblk) * tau * vatm(i,j,iblk) + + strax(i,j,iblk) = aice(i,j,iblk) * tau * uatm(i,j,iblk) + stray(i,j,iblk) = aice(i,j,iblk) * tau * vatm(i,j,iblk) ! initialization test ! Diagonal wind vectors 1 @@ -5341,8 +5411,186 @@ subroutine box2001_data enddo enddo ! nblocks - end subroutine box2001_data + end subroutine box2001_data_atm + +!======================================================================= +! + subroutine box2001_data_ocn + +! wind and current fields as in Hunke, JCP 2001 +! these are defined at the u point +! authors: Elizabeth Hunke, LANL + + use ice_domain, only: nblocks, blocks_ice + use ice_domain_size, only: max_blocks + use ice_calendar, only: timesecs + use ice_blocks, only: block, get_block, nx_block, ny_block, nghost + use ice_flux, only: uocn, vocn + use ice_grid, only: uvm + + ! local parameters + + integer (kind=int_kind) :: & + iblk, i,j ! loop indices + + integer (kind=int_kind) :: & + iglob(nx_block), & ! global indices + jglob(ny_block) ! global indices + + type (block) :: & + this_block ! block information for current block + + real (kind=dbl_kind) :: & + secday, pi , puny, period, pi2, tau + + character(len=*), parameter :: subname = '(box2001_data_ocn)' + + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + + this_block = get_block(blocks_ice(iblk),iblk) + iglob = this_block%i_glob + jglob = this_block%j_glob + + ! ocean current + ! constant in time, could be initialized in ice_flux.F90 + uocn(i,j,iblk) = p2*real(jglob(j), kind=dbl_kind) & + / real(ny_global,kind=dbl_kind) - p1 + vocn(i,j,iblk) = -p2*real(iglob(i), kind=dbl_kind) & + / real(nx_global,kind=dbl_kind) + p1 + + uocn(i,j,iblk) = uocn(i,j,iblk) * uvm(i,j,iblk) + vocn(i,j,iblk) = vocn(i,j,iblk) * uvm(i,j,iblk) + + enddo + enddo + enddo ! nblocks + + end subroutine box2001_data_ocn + +!======================================================================= +! + subroutine uniform_data_atm(dir,spd) +! uniform wind fields in some direction + + use ice_domain, only: nblocks + use ice_domain_size, only: max_blocks + use ice_blocks, only: nx_block, ny_block, nghost + use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray + use ice_state, only: aice + + character(len=*), intent(in) :: dir + real(kind=dbl_kind), intent(in), optional :: spd ! velocity + + ! local parameters + + integer (kind=int_kind) :: & + iblk, i,j ! loop indices + + real (kind=dbl_kind) :: & + tau, & + atm_val ! value to use for atm speed + + character(len=*), parameter :: subname = '(uniform_data_atm)' + + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + + ! check for optional spd + if (present(spd)) then + atm_val = spd + else + atm_val = c5 ! default + endif + + ! wind components + if (dir == 'NE') then + uatm = atm_val + vatm = atm_val + elseif (dir == 'N') then + uatm = c0 + vatm = atm_val + elseif (dir == 'E') then + uatm = atm_val + vatm = c0 + elseif (dir == 'S') then + uatm = c0 + vatm = -atm_val + elseif (dir == 'W') then + uatm = -atm_val + vatm = c0 + else + call abort_ice (subname//'ERROR: dir unknown, dir = '//trim(dir), & + file=__FILE__, line=__LINE__) + endif + + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + + ! wind stress + wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2) + tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk) + strax(i,j,iblk) = aice(i,j,iblk) * tau * uatm(i,j,iblk) + stray(i,j,iblk) = aice(i,j,iblk) * tau * vatm(i,j,iblk) + + enddo + enddo + enddo ! nblocks + + end subroutine uniform_data_atm +!======================================================================= + +! + subroutine uniform_data_ocn(dir,spd) + +! uniform wind fields in some direction + + use ice_domain, only: nblocks + use ice_domain_size, only: max_blocks + use ice_blocks, only: nx_block, ny_block, nghost + use ice_flux, only: uocn, vocn + + character(len=*), intent(in) :: dir + + real(kind=dbl_kind), intent(in), optional :: spd ! velocity + + ! local parameters + + integer (kind=int_kind) :: & + iblk, i,j ! loop indices + + real(kind=dbl_kind) :: & + ocn_val ! value to use for ocean currents + + character(len=*), parameter :: subname = '(uniform_data_ocn)' + + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + + if (present(spd)) then + ocn_val = spd + else + ocn_val = p1 ! default + endif + + ! ocn components + if (dir == 'NE') then + uocn = ocn_val + vocn = ocn_val + elseif (dir == 'N') then + uocn = c0 + vocn = ocn_val + elseif (dir == 'E') then + uocn = ocn_val + vocn = c0 + else + call abort_ice (subname//'ERROR: dir unknown, dir = '//trim(dir), & + file=__FILE__, line=__LINE__) + endif + end subroutine uniform_data_ocn !======================================================================= subroutine get_wave_spec diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 0275ca67c..6276770bc 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -14,7 +14,7 @@ module ice_init use ice_kinds_mod use ice_communicate, only: my_task, master_task, ice_barrier - use ice_constants, only: c0, c1, c2, c3, p2, p5 + use ice_constants, only: c0, c1, c2, c3, c5, p2, p3, p5, p75, p166 use ice_exit, only: abort_ice use ice_fileunits, only: nu_nml, nu_diag, nu_diag_set, nml_filename, diag_type, & ice_stdout, get_fileunit, release_fileunit, bfbflag, flush_fileunit, & @@ -36,9 +36,9 @@ module ice_init implicit none private - character(len=char_len_long),public :: & + character(len=char_len_long), public :: & ice_ic ! method of ice cover initialization - ! 'default' => latitude and sst dependent + ! 'internal' => set from ice_data_ namelist ! 'none' => no ice ! filename => read file @@ -89,25 +89,30 @@ subroutine input_data atm_data_type, atm_data_dir, precip_units, rotate_wind, & atm_data_format, ocn_data_format, & bgc_data_type, & - ocn_data_type, ocn_data_dir, wave_spec_file, & - oceanmixed_file, restore_ocn, trestore, & - ice_data_type, & + ocn_data_type, ocn_data_dir, wave_spec_file, & + oceanmixed_file, restore_ocn, trestore, & + ice_data_type, ice_data_conc, ice_data_dist, & snw_filename, & snw_tau_fname, snw_kappa_fname, snw_drdt0_fname, & snw_rhos_fname, snw_Tgrd_fname, snw_T_fname use ice_arrays_column, only: bgc_data_dir, fe_data_type use ice_grid, only: grid_file, gridcpl_file, kmt_file, & bathymetry_file, use_bathymetry, & - bathymetry_format, & + bathymetry_format, kmt_type, & grid_type, grid_format, & + grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & + grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & + grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & dxrect, dyrect, & pgl_global_ext use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & - evp_algorithm, & - seabed_stress, seabed_stress_method, & - k1, k2, alphab, threshold_hw, Ktens, & + evp_algorithm, visc_method, & + seabed_stress, seabed_stress_method, & + k1, k2, alphab, threshold_hw, Ktens, & e_yieldcurve, e_plasticpot, coriolis, & - ssh_stress, kridge, brlx, arlx + ssh_stress, kridge, brlx, arlx, & + deltaminEVP, deltaminVP, capping, & + elasticDamp use ice_dyn_vp, only: maxits_nonlin, precond, dim_fgmres, dim_pgmres, maxits_fgmres, & maxits_pgmres, monitor_nonlin, monitor_fgmres, & @@ -117,6 +122,7 @@ subroutine input_data use ice_transport_driver, only: advection, conserv_check use ice_restoring, only: restore_ice use ice_timers, only: timer_stats + use ice_memusage, only: memory_stats #ifdef CESMCOUPLED use shr_file_mod, only: shr_file_setIO #endif @@ -175,7 +181,7 @@ subroutine input_data print_global, print_points, latpnt, lonpnt, & debug_forcing, histfreq, histfreq_n, hist_avg, & history_dir, history_file, history_precision, cpl_bgc, & - histfreq_base, dumpfreq_base, timer_stats, & + histfreq_base, dumpfreq_base, timer_stats, memory_stats, & conserv_check, debug_model, debug_model_step, & debug_model_i, debug_model_j, debug_model_iblk, debug_model_task, & year_init, month_init, day_init, sec_init, & @@ -186,7 +192,8 @@ subroutine input_data bathymetry_file, use_bathymetry, nfsd, bathymetry_format, & ncat, nilyr, nslyr, nblyr, & kcatbound, gridcpl_file, dxrect, dyrect, & - close_boundaries, orca_halogrid + close_boundaries, orca_halogrid, grid_ice, kmt_type, & + grid_atm, grid_ocn namelist /tracer_nml/ & tr_iage, restart_age, & @@ -210,11 +217,11 @@ subroutine input_data namelist /dynamics_nml/ & kdyn, ndte, revised_evp, yield_curve, & - evp_algorithm, & + evp_algorithm, elasticDamp, & brlx, arlx, ssh_stress, & advection, coriolis, kridge, ktransport, & kstrength, krdg_partic, krdg_redist, mu_rdg, & - e_yieldcurve, e_plasticpot, Ktens, & + e_yieldcurve, e_plasticpot, visc_method, & maxits_nonlin, precond, dim_fgmres, & dim_pgmres, maxits_fgmres, maxits_pgmres, monitor_nonlin, & monitor_fgmres, monitor_pgmres, reltol_nonlin, reltol_fgmres, & @@ -222,7 +229,8 @@ subroutine input_data damping_andacc, start_andacc, fpfunc_andacc, use_mean_vrel, & ortho_type, seabed_stress, seabed_stress_method, & k1, k2, alphab, threshold_hw, & - Cf, Pstar, Cstar + deltaminEVP, deltaminVP, capping, & + Cf, Pstar, Cstar, Ktens namelist /shortwave_nml/ & shortwave, albedo_type, & @@ -251,8 +259,8 @@ subroutine input_data oceanmixed_ice, restore_ice, restore_ocn, trestore, & precip_units, default_season, wave_spec_type,nfreq, & atm_data_type, ocn_data_type, bgc_data_type, fe_data_type, & - ice_data_type, wave_spec_file, restart_coszen, & - fyear_init, ycycle, & + ice_data_type, ice_data_conc, ice_data_dist, & + fyear_init, ycycle, wave_spec_file,restart_coszen, & atm_data_dir, ocn_data_dir, bgc_data_dir, & atm_data_format, ocn_data_format, rotate_wind, & oceanmixed_file @@ -294,6 +302,7 @@ subroutine input_data print_points = .false. ! if true, print point data print_global = .true. ! if true, print global diagnostic data timer_stats = .false. ! if true, print out detailed timer statistics + memory_stats = .false. ! if true, print out memory information bfbflag = 'off' ! off = optimized diag_type = 'stdout' diag_file = 'ice_diag.d' @@ -328,11 +337,15 @@ subroutine input_data grid_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) grid_type = 'rectangular' ! define rectangular grid internally grid_file = 'unknown_grid_file' + grid_ice = 'B' ! underlying grid system + grid_atm = 'A' ! underlying atm forcing/coupling grid + grid_ocn = 'A' ! underlying atm forcing/coupling grid gridcpl_file = 'unknown_gridcpl_file' orca_halogrid = .false. ! orca haloed grid bathymetry_file = 'unknown_bathymetry_file' bathymetry_format = 'default' use_bathymetry = .false. + kmt_type = 'file' kmt_file = 'unknown_kmt_file' version_name = 'unknown_version_name' ncat = 0 ! number of ice thickness categories @@ -347,11 +360,12 @@ subroutine input_data ndtd = 1 ! dynamic time steps per thermodynamic time step ndte = 120 ! subcycles per dynamics timestep: ndte=dt_dyn/dte evp_algorithm = 'standard_2d' ! EVP kernel (=standard_2d: standard cice evp; =shared_mem_1d: 1d shared memory and no mpi. if more mpi processors then executed on master + elasticDamp = 0.36_dbl_kind ! coefficient for calculating the parameter E pgl_global_ext = .false. ! if true, init primary grid lengths (global ext.) brlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared arlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared revised_evp = .false. ! if true, use revised procedure for evp dynamics - yield_curve = 'ellipse' + yield_curve = 'ellipse' ! yield curve kstrength = 1 ! 1 = Rothrock 75 strength, 0 = Hibler 79 Pstar = 2.75e4_dbl_kind ! constant in Hibler strength formula (kstrength = 0) Cstar = 20._dbl_kind ! constant in Hibler strength formula (kstrength = 0) @@ -372,6 +386,10 @@ subroutine input_data Ktens = 0.0_dbl_kind ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve e_plasticpot = 2.0_dbl_kind ! VP aspect ratio of elliptical plastic potential + visc_method = 'avg_strength' ! calc viscosities at U point: avg_strength, avg_zeta + deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscosities (EVP, Hunke 2001) + deltaminVP = 2e-9_dbl_kind ! minimum delta for viscosities (VP, Hibler 1979) + capping = 1.0_dbl_kind ! method for capping of viscosities (1=Hibler 1979,0=Kreyscher2000) maxits_nonlin = 4 ! max nb of iteration for nonlinear solver precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), 'pgmres' (Jacobi-preconditioned GMRES) dim_fgmres = 50 ! size of fgmres Krylov subspace @@ -475,7 +493,9 @@ subroutine input_data ocn_data_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) bgc_data_type = 'default' fe_data_type = 'default' - ice_data_type = 'default' ! used by some tests to initialize ice state (concentration, velocities) + ice_data_type = 'default' ! used by some tests to initialize ice state (overall type and mask) + ice_data_conc = 'default' ! used by some tests to initialize ice state (concentration) + ice_data_dist = 'default' ! used by some tests to initialize ice state (distribution) bgc_data_dir = 'unknown_bgc_data_dir' ocn_data_type = 'default' ocn_data_dir = 'unknown_ocn_data_dir' @@ -733,7 +753,9 @@ subroutine input_data open(nu_diag,file=tmpstr) endif end if - if (trim(ice_ic) /= 'default' .and. trim(ice_ic) /= 'none') then + if (trim(ice_ic) /= 'default' .and. & + trim(ice_ic) /= 'none' .and. & + trim(ice_ic) /= 'internal') then restart = .true. end if #else @@ -766,6 +788,7 @@ subroutine input_data call broadcast_scalar(print_points, master_task) call broadcast_scalar(print_global, master_task) call broadcast_scalar(timer_stats, master_task) + call broadcast_scalar(memory_stats, master_task) call broadcast_scalar(bfbflag, master_task) call broadcast_scalar(diag_type, master_task) call broadcast_scalar(diag_file, master_task) @@ -802,12 +825,16 @@ subroutine input_data call broadcast_scalar(dyrect, master_task) call broadcast_scalar(close_boundaries, master_task) call broadcast_scalar(grid_type, master_task) + call broadcast_scalar(grid_ice, master_task) + call broadcast_scalar(grid_ocn, master_task) + call broadcast_scalar(grid_atm, master_task) call broadcast_scalar(grid_file, master_task) call broadcast_scalar(gridcpl_file, master_task) call broadcast_scalar(orca_halogrid, master_task) call broadcast_scalar(bathymetry_file, master_task) call broadcast_scalar(bathymetry_format, master_task) call broadcast_scalar(use_bathymetry, master_task) + call broadcast_scalar(kmt_type, master_task) call broadcast_scalar(kmt_file, master_task) call broadcast_scalar(kitd, master_task) call broadcast_scalar(kcatbound, master_task) @@ -815,6 +842,7 @@ subroutine input_data call broadcast_scalar(ndtd, master_task) call broadcast_scalar(ndte, master_task) call broadcast_scalar(evp_algorithm, master_task) + call broadcast_scalar(elasticDamp, master_task) call broadcast_scalar(pgl_global_ext, master_task) call broadcast_scalar(brlx, master_task) call broadcast_scalar(arlx, master_task) @@ -837,6 +865,10 @@ subroutine input_data call broadcast_scalar(Ktens, master_task) call broadcast_scalar(e_yieldcurve, master_task) call broadcast_scalar(e_plasticpot, master_task) + call broadcast_scalar(visc_method, master_task) + call broadcast_scalar(deltaminEVP, master_task) + call broadcast_scalar(deltaminVP, master_task) + call broadcast_scalar(capping, master_task) call broadcast_scalar(advection, master_task) call broadcast_scalar(conserv_check, master_task) call broadcast_scalar(shortwave, master_task) @@ -936,6 +968,8 @@ subroutine input_data call broadcast_scalar(bgc_data_type, master_task) call broadcast_scalar(fe_data_type, master_task) call broadcast_scalar(ice_data_type, master_task) + call broadcast_scalar(ice_data_conc, master_task) + call broadcast_scalar(ice_data_dist, master_task) call broadcast_scalar(bgc_data_dir, master_task) call broadcast_scalar(ocn_data_type, master_task) call broadcast_scalar(ocn_data_dir, master_task) @@ -1001,6 +1035,15 @@ subroutine input_data pointer_file = trim(pointer_file) // trim(inst_suffix) #endif + !----------------------------------------------------------------- + ! update defaults + !----------------------------------------------------------------- + + if (trim(ice_ic) == 'default') ice_ic = 'internal' + if (trim(ice_data_conc) == 'default') ice_data_conc = 'parabolic' + if (trim(ice_data_dist) == 'default') ice_data_dist = 'uniform' + if (trim(ice_data_type) == 'default') ice_data_type = 'latsst' + !----------------------------------------------------------------- ! verify inputs !----------------------------------------------------------------- @@ -1027,11 +1070,11 @@ subroutine input_data restart = .true. use_restart_time = .true. elseif (trim(runtype) == 'initial') then - if (ice_ic == 'none' .or. ice_ic == 'default') then + if (ice_ic == 'none' .or. ice_ic == 'internal') then if (my_task == master_task) then - write(nu_diag,*) subname//'NOTE: ice_ic = none or default, setting restart flags to .false.' + write(nu_diag,*) subname//'NOTE: ice_ic = none or internal, setting restart flags to .false.' if (.not. use_restart_time) & - write(nu_diag,*) subname//'NOTE: ice_ic = none or default, setting use_restart_time=.false.' + write(nu_diag,*) subname//'NOTE: ice_ic = none or internal, setting use_restart_time=.false.' write(nu_diag,*) ' ' endif use_restart_time = .false. @@ -1050,7 +1093,7 @@ subroutine input_data ! restart_ext = .false. else if (my_task == master_task) then - write(nu_diag,*) subname//'NOTE: ice_ic /= none or default, setting restart=.true.' + write(nu_diag,*) subname//'NOTE: ice_ic /= none or internal, setting restart=.true.' write(nu_diag,*) ' ' endif restart = .true. @@ -1115,6 +1158,45 @@ subroutine input_data endif endif + if (grid_ice == 'CD') then + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: grid_ice = CD not supported yet' + endif + abort_list = trim(abort_list)//":47" + elseif (grid_ice == 'C_override_D') then + if (my_task == master_task) then + write(nu_diag,*) subname//' WARNING: using grid_ice = CD, not supported' + endif + grid_ice = 'CD' + endif + + if (grid_ice == 'C' .or. grid_ice == 'CD') then + if (kdyn > 1) then + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: grid_ice = C | CD only supported with kdyn<=1 (evp or off)' + write(nu_diag,*) subname//' ERROR: kdyn and grid_ice inconsistency' + endif + abort_list = trim(abort_list)//":46" + endif + if (visc_method /= 'avg_zeta' .and. visc_method /= 'avg_strength') then + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: invalid method for viscosities' + write(nu_diag,*) subname//' ERROR: visc_method should be avg_zeta or avg_strength' + endif + abort_list = trim(abort_list)//":44" + endif + endif + + if (kdyn == 1 .or. kdyn == 3) then + if (capping /= c0 .and. capping /= c1) then + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: invalid method for capping viscosities' + write(nu_diag,*) subname//' ERROR: capping should be equal to 0.0 or 1.0' + endif + abort_list = trim(abort_list)//":45" + endif + endif + rpcesm = 0 rplvl = 0 rptopo = 0 @@ -1436,6 +1518,68 @@ subroutine input_data wave_spec = .false. if (tr_fsd .and. (trim(wave_spec_type) /= 'none')) wave_spec = .true. + ! compute grid locations for thermo, u and v fields + + grid_ice_thrm = 'T' + if (grid_ice == 'A') then + grid_ice_dynu = 'T' + grid_ice_dynv = 'T' + elseif (grid_ice == 'B') then + grid_ice_dynu = 'U' + grid_ice_dynv = 'U' + elseif (grid_ice == 'C') then + grid_ice_dynu = 'E' + grid_ice_dynv = 'N' + elseif (grid_ice == 'CD') then + grid_ice_dynu = 'NE' + grid_ice_dynv = 'NE' + else + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: unknown grid_ice: '//trim(grid_ice) + endif + abort_list = trim(abort_list)//":64" + endif + + grid_atm_thrm = 'T' + if (grid_atm == 'A') then + grid_atm_dynu = 'T' + grid_atm_dynv = 'T' + elseif (grid_atm == 'B') then + grid_atm_dynu = 'U' + grid_atm_dynv = 'U' + elseif (grid_atm == 'C') then + grid_atm_dynu = 'E' + grid_atm_dynv = 'N' + elseif (grid_atm == 'CD') then + grid_atm_dynu = 'NE' + grid_atm_dynv = 'NE' + else + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: unknown grid_atm: '//trim(grid_atm) + endif + abort_list = trim(abort_list)//":65" + endif + + grid_ocn_thrm = 'T' + if (grid_ocn == 'A') then + grid_ocn_dynu = 'T' + grid_ocn_dynv = 'T' + elseif (grid_ocn == 'B') then + grid_ocn_dynu = 'U' + grid_ocn_dynv = 'U' + elseif (grid_ocn == 'C') then + grid_ocn_dynu = 'E' + grid_ocn_dynv = 'N' + elseif (grid_ocn == 'CD') then + grid_ocn_dynu = 'NE' + grid_ocn_dynv = 'NE' + else + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: unknown grid_ocn: '//trim(grid_ocn) + endif + abort_list = trim(abort_list)//":66" + endif + !----------------------------------------------------------------- ! spew !----------------------------------------------------------------- @@ -1467,6 +1611,19 @@ subroutine input_data if (trim(grid_type) == 'displaced_pole') tmpstr2 = ' : user-defined grid with rotated north pole' if (trim(grid_type) == 'tripole') tmpstr2 = ' : user-defined grid with northern hemisphere zipper' write(nu_diag,1030) ' grid_type = ',trim(grid_type),trim(tmpstr2) + write(nu_diag,1030) ' grid_ice = ',trim(grid_ice) + write(nu_diag,1030) ' grid_ice_thrm = ',trim(grid_ice_thrm) + write(nu_diag,1030) ' grid_ice_dynu = ',trim(grid_ice_dynu) + write(nu_diag,1030) ' grid_ice_dynv = ',trim(grid_ice_dynv) + write(nu_diag,1030) ' grid_atm = ',trim(grid_atm) + write(nu_diag,1030) ' grid_atm_thrm = ',trim(grid_atm_thrm) + write(nu_diag,1030) ' grid_atm_dynu = ',trim(grid_atm_dynu) + write(nu_diag,1030) ' grid_atm_dynv = ',trim(grid_atm_dynv) + write(nu_diag,1030) ' grid_ocn = ',trim(grid_ocn) + write(nu_diag,1030) ' grid_ocn_thrm = ',trim(grid_ocn_thrm) + write(nu_diag,1030) ' grid_ocn_dynu = ',trim(grid_ocn_dynu) + write(nu_diag,1030) ' grid_ocn_dynv = ',trim(grid_ocn_dynv) + write(nu_diag,1030) ' kmt_type = ',trim(kmt_type) if (trim(grid_type) /= 'rectangular') then if (use_bathymetry) then tmpstr2 = ' : bathymetric input data is used' @@ -1549,12 +1706,22 @@ subroutine input_data endif if (kdyn == 1 .or. kdyn == 3) then - write(nu_diag,1030) ' yield_curve = ', trim(yield_curve) + write(nu_diag,1030) ' yield_curve = ', trim(yield_curve), ' : yield curve' if (trim(yield_curve) == 'ellipse') & - write(nu_diag,1002) ' e_yieldcurve = ', e_yieldcurve, ' : aspect ratio of yield curve' - write(nu_diag,1002) ' e_plasticpot = ', e_plasticpot, ' : aspect ratio of plastic potential' + write(nu_diag,1002) ' e_yieldcurve = ', e_yieldcurve, ' : aspect ratio of yield curve' + write(nu_diag,1002) ' e_plasticpot = ', e_plasticpot, ' : aspect ratio of plastic potential' endif - + + if (kdyn == 1) then + write(nu_diag,1003) ' deltamin = ', deltaminEVP, ' : minimum delta for viscosities' + write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscosities' + elseif (kdyn == 3) then + write(nu_diag,1003) ' deltamin = ', deltaminVP, ' : minimum delta for viscosities' + write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscosities' + endif + + write(nu_diag,1002) ' elasticDamp = ', elasticDamp, ' : coefficient for calculating the parameter E' + if (trim(coriolis) == 'latitude') then tmpstr2 = ' : latitude-dependent Coriolis parameter' elseif (trim(coriolis) == 'contant') then @@ -1603,6 +1770,10 @@ subroutine input_data write(nu_diag,1002) ' alphab = ', alphab, ' : factor for landfast ice' endif endif + if (grid_ice == 'C' .or. grid_ice == 'CD') then + write(nu_diag,1030) ' visc_method= ', trim(visc_method),' : viscosities method (U point)' + endif + write(nu_diag,1002) ' Ktens = ', Ktens, ' : tensile strength factor' if (kdyn == 3) then @@ -2013,6 +2184,7 @@ subroutine input_data write(nu_diag,1021) ' debug_model_iblk = ', debug_model_iblk write(nu_diag,1021) ' debug_model_task = ', debug_model_task write(nu_diag,1011) ' timer_stats = ', timer_stats + write(nu_diag,1011) ' memory_stats = ', memory_stats write(nu_diag,1031) ' bfbflag = ', trim(bfbflag) write(nu_diag,1021) ' numin = ', numin write(nu_diag,1021) ' numax = ', numax @@ -2048,7 +2220,8 @@ subroutine input_data write(nu_diag,1031) ' grid_file = ', trim(grid_file) write(nu_diag,1031) ' gridcpl_file = ', trim(gridcpl_file) write(nu_diag,1031) ' bathymetry_file = ', trim(bathymetry_file) - write(nu_diag,1031) ' kmt_file = ', trim(kmt_file) + if (trim(kmt_type) == 'file') & + write(nu_diag,1031) ' kmt_file = ', trim(kmt_file) endif write(nu_diag,1011) ' orca_halogrid = ', orca_halogrid @@ -2079,6 +2252,8 @@ subroutine input_data write(nu_diag,1031) ' bgc_data_type = ', trim(bgc_data_type) write(nu_diag,1031) ' fe_data_type = ', trim(fe_data_type) write(nu_diag,1031) ' ice_data_type = ', trim(ice_data_type) + write(nu_diag,1031) ' ice_data_conc = ', trim(ice_data_conc) + write(nu_diag,1031) ' ice_data_dist = ', trim(ice_data_dist) write(nu_diag,1031) ' bgc_data_dir = ', trim(bgc_data_dir) write(nu_diag,1031) ' ocn_data_type = ', trim(ocn_data_type) if (trim(bgc_data_type) /= 'default' .or. & @@ -2134,6 +2309,29 @@ subroutine input_data abort_list = trim(abort_list)//":20" endif + if (grid_ice /= 'B' .and. & + grid_ice /= 'C' .and. & + grid_ice /= 'CD' ) then + if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown grid_ice=',trim(grid_ice) + abort_list = trim(abort_list)//":26" + endif + + if (kmt_type /= 'file' .and. & + kmt_type /= 'channel' .and. & + kmt_type /= 'wall' .and. & + kmt_type /= 'default' .and. & + kmt_type /= 'boxislands') then + if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown kmt_type=',trim(kmt_type) + abort_list = trim(abort_list)//":27" + endif + + if (grid_type /= 'column' .and. & + grid_type /= 'rectangular' .and. & + kmt_type /= 'file') then + if (my_task == master_task) write(nu_diag,*) subname//' ERROR: need kmt file, kmt_type=',trim(kmt_type) + abort_list = trim(abort_list)//":28" + endif + if (kdyn == 1 .and. & evp_algorithm /= 'standard_2d' .and. & evp_algorithm /= 'shared_mem_1d') then @@ -2188,6 +2386,7 @@ subroutine input_data 1000 format (a20,1x,f13.6,1x,a) ! float 1002 format (a20,5x,f9.2,1x,a) + 1003 format (a20,1x,G13.4,1x,a) 1009 format (a20,1x,d13.6,1x,a) 1010 format (a20,8x,l6,1x,a) ! logical 1011 format (a20,1x,l6) @@ -2212,13 +2411,16 @@ end subroutine input_data subroutine init_state use ice_blocks, only: block, get_block, nx_block, ny_block - use ice_domain, only: nblocks, blocks_ice + use ice_domain, only: nblocks, blocks_ice, halo_info use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero, nfsd use ice_flux, only: sst, Tf, Tair, salinz, Tmltz - use ice_grid, only: tmask, ULON, TLAT + use ice_grid, only: tmask, ULON, TLAT, grid_ice, grid_average_X2Y + use ice_boundary, only: ice_HaloUpdate + use ice_constants, only: field_loc_Nface, field_loc_Eface, field_type_scalar use ice_state, only: trcr_depend, aicen, trcrn, vicen, vsnon, & aice0, aice, vice, vsno, trcr, aice_init, bound_state, & - n_trcr_strata, nt_strata, trcr_base, uvel, vvel + n_trcr_strata, nt_strata, trcr_base, uvel, vvel, & + uvelN, vvelN, uvelE, vvelE integer (kind=int_kind) :: & ilo, ihi , & ! physical domain indices @@ -2449,6 +2651,27 @@ subroutine init_state vicen, vsnon, & ntrcr, trcrn) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + call grid_average_X2Y('S',uvel,'U',uvelN,'N') + call grid_average_X2Y('S',vvel,'U',vvelN,'N') + call grid_average_X2Y('S',uvel,'U',uvelE,'E') + call grid_average_X2Y('S',vvel,'U',vvelE,'E') + + ! Halo update on North, East faces + call ice_HaloUpdate(uvelN, halo_info, & + field_loc_Nface, field_type_scalar) + call ice_HaloUpdate(vvelN, halo_info, & + field_loc_Nface, field_type_scalar) + + call ice_HaloUpdate(uvelE, halo_info, & + field_loc_Eface, field_type_scalar) + call ice_HaloUpdate(vvelE, halo_info, & + field_loc_Eface, field_type_scalar) + + endif + + !----------------------------------------------------------------- ! compute aggregate ice state and open water area !----------------------------------------------------------------- @@ -2516,10 +2739,11 @@ subroutine set_state_var (nx_block, ny_block, & vicen, vsnon, & uvel, vvel) + use ice_arrays_column, only: hin_max use ice_domain_size, only: nilyr, nslyr, nx_global, ny_global, ncat - use ice_grid, only: grid_type - use ice_forcing, only: ice_data_type + use ice_grid, only: grid_type, dxrect, dyrect + use ice_forcing, only: ice_data_type, ice_data_conc, ice_data_dist integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -2557,24 +2781,36 @@ subroutine set_state_var (nx_block, ny_block, & ! 1: surface temperature of ice/snow (C) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - uvel , & ! ice velocity + uvel , & ! ice velocity B grid vvel ! ! local variables - integer (kind=int_kind) :: & i, j , & ! horizontal indices ij , & ! horizontal index, combines i and j loops k , & ! ice layer index n , & ! thickness category index it , & ! tracer index + iedge , & ! edge around big block + jedge , & ! edge around big block icells ! number of cells initialized with ice + logical (kind=log_kind) :: & + in_slot, in_cyl ! boxslotcyl flags + + real (kind=dbl_kind) :: & ! boxslotcyl parameters + diam , & ! cylinder diameter + radius , & ! cylinder radius + center_x, & ! cylinder center + center_y, & + width , & ! slot width + length ! slot height + integer (kind=int_kind), dimension(nx_block*ny_block) :: & indxi, indxj ! compressed indices for cells with aicen > puny real (kind=dbl_kind) :: & - Tsfc, sum, hbar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall + Tsfc, sum, hbar, abar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall, dist_ratio real (kind=dbl_kind), dimension(ncat) :: & ainit, hinit ! initial area, thickness @@ -2657,99 +2893,255 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - if (trim(ice_ic) == 'default') then - - !----------------------------------------------------------------- - ! Place ice where ocean surface is cold. - ! Note: If SST is not read from a file, then the ocean is assumed - ! to be at its freezing point everywhere, and ice will - ! extend to the prescribed edges. - !----------------------------------------------------------------- - - if (trim(ice_data_type) == 'box2001') then + if (trim(ice_ic) == 'internal') then + + !--------------------------------------------------------- + ! ice concentration/thickness + !--------------------------------------------------------- + + if (trim(ice_data_conc) == 'p5' .or. & + trim(ice_data_conc) == 'p8' .or. & + trim(ice_data_conc) == 'p9' .or. & + trim(ice_data_conc) == 'c1') then + + if (trim(ice_data_conc) == 'p5') then + hbar = c2 ! initial ice thickness + abar = p5 ! initial ice concentration + elseif (trim(ice_data_conc) == 'p8') then + hbar = c1 ! initial ice thickness + abar = 0.8_dbl_kind ! initial ice concentration + elseif (trim(ice_data_conc) == 'p9') then + hbar = c1 ! initial ice thickness + abar = 0.9_dbl_kind ! initial ice concentration + elseif (trim(ice_data_conc) == 'c1') then + hbar = c1 ! initial ice thickness + abar = c1 ! initial ice concentration + endif - hbar = c2 ! initial ice thickness do n = 1, ncat hinit(n) = c0 ainit(n) = c0 - if (hbar > hin_max(n-1) .and. hbar < hin_max(n)) then + if (hbar > hin_max(n-1) .and. hbar <= hin_max(n)) then hinit(n) = hbar - ainit(n) = p5 !echmod symm + ainit(n) = abar endif enddo - elseif (trim(ice_data_type) == 'boxslotcyl') then - - hbar = c1 ! initial ice thickness (1 m) + elseif (trim(ice_data_conc) == 'parabolic') then + + ! initial category areas in cells with ice + hbar = c3 ! initial ice thickness with greatest area + ! Note: the resulting average ice thickness + ! tends to be less than hbar due to the + ! nonlinear distribution of ice thicknesses + sum = c0 do n = 1, ncat - hinit(n) = c0 - ainit(n) = c0 - if (hbar > hin_max(n-1) .and. hbar < hin_max(n)) then - hinit(n) = hbar - ainit(n) = c1 !echmod symm + if (n < ncat) then + hinit(n) = p5*(hin_max(n-1) + hin_max(n)) ! m + else ! n=ncat + hinit(n) = (hin_max(n-1) + c1) ! m endif + ! parabola, max at h=hbar, zero at h=0, 2*hbar + ainit(n) = max(c0, (c2*hbar*hinit(n) - hinit(n)**2)) + sum = sum + ainit(n) enddo - + do n = 1, ncat + ainit(n) = ainit(n) / (sum + puny/ncat) ! normalize + enddo + else - ! initial category areas in cells with ice - hbar = c3 ! initial ice thickness with greatest area - ! Note: the resulting average ice thickness - ! tends to be less than hbar due to the - ! nonlinear distribution of ice thicknesses - sum = c0 - do n = 1, ncat - if (n < ncat) then - hinit(n) = p5*(hin_max(n-1) + hin_max(n)) ! m - else ! n=ncat - hinit(n) = (hin_max(n-1) + c1) ! m - endif - ! parabola, max at h=hbar, zero at h=0, 2*hbar - ainit(n) = max(c0, (c2*hbar*hinit(n) - hinit(n)**2)) - sum = sum + ainit(n) - enddo - do n = 1, ncat - ainit(n) = ainit(n) / (sum + puny/ncat) ! normalize - enddo + call abort_ice(subname//'ERROR: ice_data_conc setting = '//trim(ice_data_conc), & + file=__FILE__, line=__LINE__) - endif ! ice_data_type + endif ! ice_data_conc - if (trim(grid_type) == 'rectangular') then + !--------------------------------------------------------- + ! location of ice + !--------------------------------------------------------- - ! place ice on left side of domain - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j)) then - if (ULON(i,j) < -50./rad_to_deg) then + if (trim(ice_data_type) == 'box2001') then + + ! place ice on left side of domain + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then + if (ULON(i,j) < -50./rad_to_deg) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! ULON + endif ! tmask + enddo ! i + enddo ! j + + elseif (trim(ice_data_type) == 'boxslotcyl') then + + ! Geometric configuration of the slotted cylinder + diam = p3 *dxrect*(nx_global-1) + center_x = p5 *dxrect*(nx_global-1) + center_y = p75*dyrect*(ny_global-1) + radius = p5*diam + width = p166*diam + length = c5*p166*diam + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then + ! check if grid point is inside slotted cylinder + in_slot = (dxrect*real(iglob(i)-1, kind=dbl_kind) >= center_x - width/c2) .and. & + (dxrect*real(iglob(i)-1, kind=dbl_kind) <= center_x + width/c2) .and. & + (dyrect*real(jglob(j)-1, kind=dbl_kind) >= center_y - radius) .and. & + (dyrect*real(jglob(j)-1, kind=dbl_kind) <= center_y + (length - radius)) + + in_cyl = sqrt((dxrect*real(iglob(i)-1, kind=dbl_kind) - center_x)**c2 + & + (dyrect*real(jglob(j)-1, kind=dbl_kind) - center_y)**c2) <= radius + + if (in_cyl .and. .not. in_slot) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + endif + enddo + enddo + + elseif (trim(ice_data_type) == 'uniform') then + ! all cells not land mask are ice + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then icells = icells + 1 indxi(icells) = i indxj(icells) = j - endif ! ULON - endif ! tmask - enddo ! i - enddo ! j + endif + enddo + enddo - else + elseif (trim(ice_data_type) == 'channel') then + ! channel ice in center of domain in i direction + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (jglob(j) > ny_global/4 .and. jglob(j) < 3*nx_global/4) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo - ! place ice at high latitudes where ocean sfc is cold - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j)) then - ! place ice in high latitudes where ocean sfc is cold - if ( (sst (i,j) <= Tf(i,j)+p2) .and. & - (TLAT(i,j) < edge_init_sh/rad_to_deg .or. & - TLAT(i,j) > edge_init_nh/rad_to_deg) ) then + elseif (trim(ice_data_type) == 'smallblock') then + ! 2x2 ice in center of domain + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if ((iglob(i) == nx_global/2 .or. iglob(i) == nx_global/2+1) .and. & + (jglob(j) == ny_global/2 .or. jglob(j) == ny_global/2+1)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + elseif (trim(ice_data_type) == 'block') then + ! ice in 50% of domain, not at edges + icells = 0 + iedge = int(real(nx_global,kind=dbl_kind) * 0.25) + 1 + jedge = int(real(ny_global,kind=dbl_kind) * 0.25) + 1 + do j = jlo, jhi + do i = ilo, ihi + if ((iglob(i) > iedge .and. iglob(i) < nx_global-iedge+1) .and. & + (jglob(j) > jedge .and. jglob(j) < ny_global-jedge+1)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + elseif (trim(ice_data_type) == 'bigblock') then + ! ice in 90% of domain, not at edges + icells = 0 + iedge = int(real(nx_global,kind=dbl_kind) * 0.05) + 1 + jedge = int(real(ny_global,kind=dbl_kind) * 0.05) + 1 + do j = jlo, jhi + do i = ilo, ihi + if ((iglob(i) > iedge .and. iglob(i) < nx_global-iedge+1) .and. & + (jglob(j) > jedge .and. jglob(j) < ny_global-jedge+1)) then icells = icells + 1 indxi(icells) = i indxj(icells) = j - endif ! cold surface - endif ! tmask - enddo ! i - enddo ! j + endif + enddo + enddo + + elseif (trim(ice_data_type) == 'easthalf') then + ! block on east half of domain + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (iglob(i) >= nx_global/2) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + elseif (trim(ice_data_type) == 'eastblock') then + ! block on east half of domain in center of domain + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (jglob(j) > ny_global/4 .and. jglob(j) < 3*nx_global/4 .and. & + iglob(i) >= nx_global/2) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + elseif (trim(ice_data_type) == 'latsst') then + + !----------------------------------------------------------------- + ! Place ice where ocean surface is cold. + ! Note: If SST is not read from a file, then the ocean is assumed + ! to be at its freezing point everywhere, and ice will + ! extend to the prescribed edges. + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then + ! place ice in high latitudes where ocean sfc is cold + if ( (sst (i,j) <= Tf(i,j)+p2) .and. & + (TLAT(i,j) < edge_init_sh/rad_to_deg .or. & + TLAT(i,j) > edge_init_nh/rad_to_deg) ) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! cold surface + endif ! tmask + enddo ! i + enddo ! j + + else + + call abort_ice(subname//'ERROR: ice_data_type setting = '//trim(ice_data_type), & + file=__FILE__, line=__LINE__) + + endif ! ice_data_type - endif ! rectgrid + !--------------------------------------------------------- + ! ice distribution + !--------------------------------------------------------- do n = 1, ncat @@ -2760,7 +3152,7 @@ subroutine set_state_var (nx_block, ny_block, & aicen(i,j,n) = ainit(n) - if (trim(ice_data_type) == 'box2001') then + if (trim(ice_data_dist) == 'box2001') then if (hinit(n) > c0) then ! ! constant slope from 0 to 1 in x direction aicen(i,j,n) = (real(iglob(i), kind=dbl_kind)-p5) & @@ -2780,19 +3172,29 @@ subroutine set_state_var (nx_block, ny_block, & ! - real(jglob(j), kind=dbl_kind)-p5) & ! / (real(ny_global,kind=dbl_kind)) * p5) endif - vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m - elseif (trim(ice_data_type) == 'boxslotcyl') then + + elseif (trim(ice_data_dist) == 'gauss') then if (hinit(n) > c0) then - ! slotted cylinder - call boxslotcyl_data_aice(aicen, i, j, & - nx_block, ny_block, & - n, ainit, & - iglob, jglob) + dist_ratio = 8._dbl_kind * & + sqrt((real(iglob(i),kind=dbl_kind)-real(nx_global+1,kind=dbl_kind)/c2)**2 + & + (real(jglob(j),kind=dbl_kind)-real(ny_global+1,kind=dbl_kind)/c2)**2) / & + sqrt((real(nx_global,kind=dbl_kind))**2 + & + (real(ny_global,kind=dbl_kind))**2) + aicen(i,j,n) = ainit(n) * exp(-dist_ratio) endif - vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m + + elseif (trim(ice_data_dist) == 'uniform') then + + ! nothing extra to do + else - vicen(i,j,n) = hinit(n) * ainit(n) ! m - endif + + call abort_ice(subname//'ERROR: ice_data_dist setting = '//trim(ice_data_dist), & + file=__FILE__, line=__LINE__) + + endif ! ice_data_dist + + vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m vsnon(i,j,n) = min(aicen(i,j,n)*hsno_init,p2*vicen(i,j,n)) call icepack_init_trcr(Tair = Tair(i,j), Tf = Tf(i,j), & @@ -2819,7 +3221,11 @@ subroutine set_state_var (nx_block, ny_block, & enddo ! ij enddo ! ncat - ! velocity initialization for special tests + !--------------------------------------------------------- + ! ice velocity + ! these velocites are defined on B-grid + !--------------------------------------------------------- + if (trim(ice_data_type) == 'boxslotcyl') then do j = 1, ny_block do i = 1, nx_block @@ -2829,7 +3235,11 @@ subroutine set_state_var (nx_block, ny_block, & uvel, vvel) enddo ! j enddo ! i + else + uvel = c0 + vvel = c0 endif + endif ! ice_ic call icepack_warnings_flush(nu_diag) @@ -2840,85 +3250,6 @@ end subroutine set_state_var !======================================================================= -! Set ice concentration for slotted cylinder advection test -! -! author: Philippe Blain (ECCC) - - subroutine boxslotcyl_data_aice(aicen, i, j, & - nx_block, ny_block, & - n, ainit, & - iglob, jglob) - - use ice_constants, only: c0, c2, c5, p3, p166, p75, p5 - use ice_domain_size, only: nx_global, ny_global, ncat - use ice_grid, only: dxrect, dyrect - - integer (kind=int_kind), intent(in) :: & - i, j , & ! local indices - nx_block, ny_block, & ! block dimensions - iglob(nx_block) , & ! global indices - jglob(ny_block) , & - n ! thickness category index - - real (kind=dbl_kind), dimension(ncat) :: & - ainit ! initial area - - real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(out) :: & - aicen ! concentration of ice - - ! local variables - - logical :: in_slot, in_cyl, in_slotted_cyl - - real (kind=dbl_kind), dimension (2) :: & - slot_x, & ! geometric limits of the slot - slot_y - - real (kind=dbl_kind) :: & - diam , & ! cylinder diameter - radius , & ! cylinder radius - center_x, & ! cylinder center - center_y, & - width , & ! slot width - length ! slot height - - character(len=*), parameter :: subname = '(boxslotcyl_data_aice)' - - ! Geometric configuration of the slotted cylinder - diam = p3 *dxrect*(nx_global-1) - center_x = p5 *dxrect*(nx_global-1) - center_y = p75*dyrect*(ny_global-1) - radius = p5*diam - width = p166*diam - length = c5*p166*diam - - slot_x(1) = center_x - width/c2 - slot_x(2) = center_x + width/c2 - slot_y(1) = center_y - radius - slot_y(2) = center_y + (length - radius) - - ! check if grid point is inside slotted cylinder - in_slot = (dxrect*real(iglob(i)-1, kind=dbl_kind) >= slot_x(1)) .and. & - (dxrect*real(iglob(i)-1, kind=dbl_kind) <= slot_x(2)) .and. & - (dyrect*real(jglob(j)-1, kind=dbl_kind) >= slot_y(1)) .and. & - (dyrect*real(jglob(j)-1, kind=dbl_kind) <= slot_y(2)) - - in_cyl = sqrt((dxrect*real(iglob(i)-1, kind=dbl_kind) - center_x)**c2 + & - (dyrect*real(jglob(j)-1, kind=dbl_kind) - center_y)**c2) <= radius - - in_slotted_cyl = in_cyl .and. .not. in_slot - - if (in_slotted_cyl) then - aicen(i,j,n) = ainit(n) - else - aicen(i,j,n) = c0 - endif - - - end subroutine boxslotcyl_data_aice - -!======================================================================= - ! Set ice velocity for slotted cylinder advection test ! ! author: Philippe Blain (ECCC) @@ -2942,7 +3273,6 @@ subroutine boxslotcyl_data_vel(i, j, & uvel, vvel ! ice velocity ! local variables - real (kind=dbl_kind) :: & pi , & ! pi secday , & ! seconds per day diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index 362fd1413..e07eca209 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -107,8 +107,12 @@ module ice_state real (kind=dbl_kind), dimension(:,:,:), allocatable, & public :: & - uvel , & ! x-component of velocity (m/s) - vvel , & ! y-component of velocity (m/s) + uvel , & ! x-component of velocity on U grid (m/s) + vvel , & ! y-component of velocity on U grid (m/s) + uvelE , & ! x-component of velocity on E grid (m/s) + vvelE , & ! y-component of velocity on E grid (m/s) + uvelN , & ! x-component of velocity on N grid (m/s) + vvelN , & ! y-component of velocity on N grid (m/s) divu , & ! strain rate I component, velocity divergence (1/s) shear , & ! strain rate II component (1/s) strength ! ice strength (N/m) @@ -149,8 +153,12 @@ subroutine alloc_state vice (nx_block,ny_block,max_blocks) , & ! volume per unit area of ice (m) vsno (nx_block,ny_block,max_blocks) , & ! volume per unit area of snow (m) aice0 (nx_block,ny_block,max_blocks) , & ! concentration of open water - uvel (nx_block,ny_block,max_blocks) , & ! x-component of velocity (m/s) - vvel (nx_block,ny_block,max_blocks) , & ! y-component of velocity (m/s) + uvel (nx_block,ny_block,max_blocks) , & ! x-component of velocity on U grid (m/s) + vvel (nx_block,ny_block,max_blocks) , & ! y-component of velocity on U grid (m/s) + uvelE (nx_block,ny_block,max_blocks) , & ! x-component of velocity on E grid (m/s) + vvelE (nx_block,ny_block,max_blocks) , & ! y-component of velocity on E grid (m/s) + uvelN (nx_block,ny_block,max_blocks) , & ! x-component of velocity on N grid (m/s) + vvelN (nx_block,ny_block,max_blocks) , & ! y-component of velocity on N grid (m/s) divu (nx_block,ny_block,max_blocks) , & ! strain rate I component, velocity divergence (1/s) shear (nx_block,ny_block,max_blocks) , & ! strain rate II component (1/s) strength (nx_block,ny_block,max_blocks) , & ! ice strength (N/m) diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index bfe29efcd..3b0201cbf 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -37,7 +37,7 @@ module ice_step_mod public :: step_therm1, step_therm2, step_dyn_horiz, step_dyn_ridge, & step_snow, prep_radiation, step_radiation, ocean_mixed_layer, & - update_state, biogeochemistry, save_init, step_dyn_wave + update_state, biogeochemistry, step_dyn_wave, step_prep !======================================================================= @@ -51,6 +51,8 @@ subroutine save_init use ice_state, only: aice, aicen, aice_init, aicen_init, & vicen, vicen_init, vsnon, vsnon_init + character(len=*), parameter :: subname = '(save_init)' + !----------------------------------------------------------------- ! Save the ice area passed to the coupler (so that history fields ! can be made consistent with coupler fields). @@ -64,6 +66,27 @@ subroutine save_init end subroutine save_init +!======================================================================= + + subroutine step_prep +! prep for step, called outside nblock loop + + use ice_flux, only: uatm, vatm, uatmT, vatmT + use ice_grid, only: grid_atm_dynu, grid_atm_dynv, grid_average_X2Y + + character(len=*), parameter :: subname = '(step_prep)' + + ! Save initial state + + call save_init + + ! Compute uatmT, vatmT + + call grid_average_X2Y('S',uatm,grid_atm_dynu,uatmT,'T') + call grid_average_X2Y('S',vatm,grid_atm_dynv,vatmT,'T') + + end subroutine step_prep + !======================================================================= ! ! Scales radiation fields computed on the previous time step. @@ -170,7 +193,7 @@ subroutine step_therm1 (dt, iblk) use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero use ice_flux, only: frzmlt, sst, Tf, strocnxT, strocnyT, rside, fbot, Tbot, Tsnice, & - meltsn, melttn, meltbn, congeln, snoicen, uatm, vatm, fside, & + meltsn, melttn, meltbn, congeln, snoicen, uatmT, vatmT, fside, & wind, rhoa, potT, Qa, zlvl, zlvs, strax, stray, flatn, fsensn, fsurfn, fcondtopn, & flw, fsnow, fpond, sss, mlt_onset, frz_onset, fcondbotn, fcondbot, fsloss, & frain, Tair, strairxT, strairyT, fsurf, fcondtop, fsens, & @@ -374,8 +397,8 @@ subroutine step_therm1 (dt, iblk) aeroice = aeroice (:,:,:), & isosno = isosno (:,:), & isoice = isoice (:,:), & - uatm = uatm (i,j, iblk), & - vatm = vatm (i,j, iblk), & + uatm = uatmT (i,j, iblk), & + vatm = vatmT (i,j, iblk), & wind = wind (i,j, iblk), & zlvl = zlvl (i,j, iblk), & zlvs = zlvs (i,j, iblk), & @@ -1383,7 +1406,7 @@ subroutine ocean_mixed_layer (dt, iblk) use ice_arrays_column, only: Cdn_atm, Cdn_atm_ratio use ice_blocks, only: nx_block, ny_block - use ice_flux, only: sst, Tf, Qa, uatm, vatm, wind, potT, rhoa, zlvl, & + use ice_flux, only: sst, Tf, Qa, uatmT, vatmT, wind, potT, rhoa, zlvl, & frzmlt, fhocn, fswthru, flw, flwout_ocn, fsens_ocn, flat_ocn, evap_ocn, & alvdr_ocn, alidr_ocn, alvdf_ocn, alidf_ocn, swidf, swvdf, swidr, swvdr, & qdp, hmix, strairx_ocn, strairy_ocn, Tref_ocn, Qref_ocn @@ -1466,8 +1489,8 @@ subroutine ocean_mixed_layer (dt, iblk) call icepack_atm_boundary(sfctype = 'ocn', & Tsf = sst (i,j,iblk), & potT = potT (i,j,iblk), & - uatm = uatm (i,j,iblk), & - vatm = vatm (i,j,iblk), & + uatm = uatmT (i,j,iblk), & + vatm = vatmT (i,j,iblk), & wind = wind (i,j,iblk), & zlvl = zlvl (i,j,iblk), & Qa = Qa (i,j,iblk), & diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 index 3959f12cf..5c9a28f10 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 @@ -225,8 +225,8 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** store some block info to fill haloes properly call ice_distributionGet(dist, numLocalBlocks=halo%numLocalBlocks) + allocate(halo%blockGlobalID(halo%numLocalBlocks)) if (halo%numLocalBlocks > 0) then - allocate(halo%blockGlobalID(halo%numLocalBlocks)) call ice_distributionGet(dist, blockGlobalID=halo%blockGlobalID) endif diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 index bc14e30d3..abec3758f 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 @@ -62,6 +62,7 @@ module ice_timers timer_sndrcv, &! time between send to receive #endif timer_bound, &! boundary updates + timer_bundbound, &! boundary updates bundling timer_bgc, &! biogeochemistry timer_forcing, &! forcing timer_evp_1d, &! timer only loop @@ -192,6 +193,7 @@ subroutine init_ice_timers call get_ice_timer(timer_diags, 'Diags ',nblocks,distrb_info%nprocs) call get_ice_timer(timer_hist, 'History ',nblocks,distrb_info%nprocs) call get_ice_timer(timer_bound, 'Bound', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_bundbound,'Bundbound',nblocks,distrb_info%nprocs) call get_ice_timer(timer_bgc, 'BGC', nblocks,distrb_info%nprocs) call get_ice_timer(timer_forcing, 'Forcing', nblocks,distrb_info%nprocs) #if (defined CESMCOUPLED) diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 index 1e4307535..13ff6fcb8 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 @@ -101,10 +101,10 @@ MODULE ice_reprosum !----------------------------------------------------------------------- subroutine ice_reprosum_setopts(repro_sum_use_ddpdd_in, & - repro_sum_rel_diff_max_in, & - repro_sum_recompute_in, & - repro_sum_master, & - repro_sum_logunit ) + repro_sum_rel_diff_max_in, & + repro_sum_recompute_in, & + repro_sum_master, & + repro_sum_logunit ) !------------------------------Arguments-------------------------------- logical, intent(in), optional :: repro_sum_use_ddpdd_in @@ -261,12 +261,12 @@ end subroutine ice_reprosum_setopts !---------------------------------------------------------------------- subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & - nflds, ddpdd_sum, & - arr_gbl_max, arr_gbl_max_out, & - arr_max_levels, arr_max_levels_out, & - gbl_max_nsummands, gbl_max_nsummands_out,& - gbl_count, repro_sum_validate, & - repro_sum_stats, rel_diff, commid ) + nflds, ddpdd_sum, & + arr_gbl_max, arr_gbl_max_out, & + arr_max_levels, arr_max_levels_out, & + gbl_max_nsummands, gbl_max_nsummands_out,& + gbl_count, repro_sum_validate, & + repro_sum_stats, rel_diff, commid ) !---------------------------------------------------------------------- ! Arguments @@ -435,7 +435,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & ! if (detailed_timing) call xicex_timer_start('ice_reprosum_ddpdd') call ice_reprosum_ddpdd(arr, arr_gsum, nsummands, dsummands, & - nflds, mpi_comm) + nflds, mpi_comm) repro_sum_fast = 1 ! if (detailed_timing) call xicex_timer_stop('ice_reprosum_ddpdd') @@ -775,9 +775,9 @@ end subroutine ice_reprosum_calc !---------------------------------------------------------------------- subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & - arr_max_shift, arr_gmax_exp, max_levels, & - max_level, validate, recompute, & - omp_nthreads, mpi_comm ) + arr_max_shift, arr_gmax_exp, max_levels, & + max_level, validate, recompute, & + omp_nthreads, mpi_comm ) !---------------------------------------------------------------------- @@ -1225,7 +1225,7 @@ end subroutine ice_reprosum_int !---------------------------------------------------------------------- logical function ice_reprosum_tolExceeded (name, nflds, master, & - logunit, rel_diff ) + logunit, rel_diff ) !---------------------------------------------------------------------- ! Arguments @@ -1311,7 +1311,7 @@ end function ice_reprosum_tolExceeded !---------------------------------------------------------------------- subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & - nflds, mpi_comm ) + nflds, mpi_comm ) !---------------------------------------------------------------------- ! Arguments diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 index b18c35040..be6e12253 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 @@ -54,6 +54,7 @@ module ice_timers timer_diags, &! diagnostics/history timer_hist, &! diagnostics/history timer_bound, &! boundary updates + timer_bundbound, &! boundary updates timer_bgc, &! biogeochemistry timer_forcing, &! forcing timer_evp_1d, &! timer only loop @@ -206,6 +207,7 @@ subroutine init_ice_timers call get_ice_timer(timer_diags, 'Diags ',nblocks,distrb_info%nprocs) call get_ice_timer(timer_hist, 'History ',nblocks,distrb_info%nprocs) call get_ice_timer(timer_bound, 'Bound', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_bundbound,'Bundbound',nblocks,distrb_info%nprocs) call get_ice_timer(timer_bgc, 'BGC', nblocks,distrb_info%nprocs) call get_ice_timer(timer_forcing, 'Forcing', nblocks,distrb_info%nprocs) call get_ice_timer(timer_evp_1d, '1d-evp', nblocks,distrb_info%nprocs) diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedynB/infrastructure/ice_domain.F90 index c44e896ac..79f5bcb9a 100644 --- a/cicecore/cicedynB/infrastructure/ice_domain.F90 +++ b/cicecore/cicedynB/infrastructure/ice_domain.F90 @@ -63,6 +63,8 @@ module ice_domain maskhalo_dyn , & ! if true, use masked halo updates for dynamics maskhalo_remap , & ! if true, use masked halo updates for transport maskhalo_bound , & ! if true, use masked halo updates for bound_state + halo_dynbundle , & ! if true, bundle halo update in dynamics + landblockelim , & ! if true, land block elimination is on orca_halogrid ! if true, input fields are haloed as defined by orca grid !----------------------------------------------------------------------- @@ -79,6 +81,7 @@ module ice_domain ! 'rake', 'spacecurve', etc distribution_wght ! method for weighting work per block ! 'block' = POP default configuration + ! 'blockall' = no land block elimination ! 'latitude' = no. ocean points * |lat| ! 'file' = read distribution_wgth_file character (char_len_long) :: & @@ -153,13 +156,15 @@ subroutine init_domain_blocks maskhalo_dyn = .false. ! if true, use masked halos for dynamics maskhalo_remap = .false. ! if true, use masked halos for transport maskhalo_bound = .false. ! if true, use masked halos for bound_state + halo_dynbundle = .true. ! if true, bundle halo updates in dynamics add_mpi_barriers = .false. ! if true, throttle communication debug_blocks = .false. ! if true, print verbose block information - max_blocks = -1 ! max number of blocks per processor + max_blocks = -1 ! max number of blocks per processor block_size_x = -1 ! size of block in first horiz dimension block_size_y = -1 ! size of block in second horiz dimension nx_global = -1 ! NXGLOB, i-axis size ny_global = -1 ! NYGLOB, j-axis size + landblockelim = .true. ! on by default if (my_task == master_task) then write(nu_diag,*) subname,' Reading domain_nml' @@ -284,7 +289,7 @@ end subroutine init_domain_blocks !*********************************************************************** - subroutine init_domain_distribution(KMTG,ULATG) + subroutine init_domain_distribution(KMTG,ULATG,grid_ice) ! This routine calls appropriate setup routines to distribute blocks ! across processors and defines arrays with block ids for any local @@ -299,6 +304,9 @@ subroutine init_domain_distribution(KMTG,ULATG) KMTG ,&! global topography ULATG ! global latitude field (radians) + character(len=*), intent(in) :: & + grid_ice ! grid_ice, B, C, CD, etc + !---------------------------------------------------------------------- ! ! local variables @@ -316,6 +324,7 @@ subroutine init_domain_distribution(KMTG,ULATG) integer (int_kind) :: & i,j,n ,&! dummy loop indices ig,jg ,&! global indices + igm1,igp1,jgm1,jgp1,&! global indices ninfo ,&! ice_distributionGet check work_unit ,&! size of quantized work unit #ifdef USE_NETCDF @@ -449,6 +458,8 @@ subroutine init_domain_distribution(KMTG,ULATG) flat = 1 endif + if (distribution_wght == 'blockall') landblockelim = .false. + allocate(nocn(nblocks_tot)) if (distribution_wght == 'file') then @@ -504,10 +515,25 @@ subroutine init_domain_distribution(KMTG,ULATG) if (this_block%i_glob(i) > 0) then ig = this_block%i_glob(i) jg = this_block%j_glob(j) - if (KMTG(ig,jg) > puny .and. & - (ULATG(ig,jg) < shlat/rad_to_deg .or. & - ULATG(ig,jg) > nhlat/rad_to_deg) ) & - nocn(n) = nocn(n) + flat(ig,jg) + if (grid_ice == 'C' .or. grid_ice == 'CD') then + ! Have to be careful about block elimination with C/CD + ! Use a bigger stencil + igm1 = mod(ig-2+nx_global,nx_global)+1 + igp1 = mod(ig,nx_global)+1 + jgm1 = max(jg-1,1) + jgp1 = min(jg+1,ny_global) + if ((KMTG(ig ,jg ) > puny .or. & + KMTG(igm1,jg ) > puny .or. KMTG(igp1,jg ) > puny .or. & + KMTG(ig ,jgp1) > puny .or. KMTG(ig ,jgm1) > puny) .and. & + (ULATG(ig,jg) < shlat/rad_to_deg .or. & + ULATG(ig,jg) > nhlat/rad_to_deg) ) & + nocn(n) = nocn(n) + flat(ig,jg) + else + if (KMTG(ig,jg) > puny .and. & + (ULATG(ig,jg) < shlat/rad_to_deg .or. & + ULATG(ig,jg) > nhlat/rad_to_deg) ) & + nocn(n) = nocn(n) + flat(ig,jg) + endif endif end do endif @@ -524,8 +550,8 @@ subroutine init_domain_distribution(KMTG,ULATG) ! Keep all blocks even the ones only containing land points if (distribution_wght == 'block') nocn(n) = nx_block*ny_block #else - if (distribution_wght == 'block' .and. & ! POP style - nocn(n) > 0) nocn(n) = nx_block*ny_block + if (distribution_wght == 'block' .and. nocn(n) > 0) nocn(n) = nx_block*ny_block + if (.not. landblockelim) nocn(n) = max(nocn(n),1) #endif end do endif ! distribution_wght = file diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 2cf7775ab..1892a396e 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -14,6 +14,12 @@ ! 2006: Converted to free source form (F90) by Elizabeth Hunke ! 2007: Option to read from netcdf files (A. Keen, Met Office) ! Grid reading routines reworked by E. Hunke for boundary values +! 2021: Add N (center of north face) and E (center of east face) grids +! to support CD solvers. Defining T at center of cells, U at +! NE corner, N at center of top face, E at center of right face. +! All cells are quadrilaterals with NE, E, and N associated with +! directions relative to logical grid. E is increasing i (x) and +! N is increasing j (y) direction. module ice_grid @@ -39,18 +45,30 @@ module ice_grid implicit none private - public :: init_grid1, init_grid2, & - t2ugrid_vector, u2tgrid_vector, & - to_ugrid, to_tgrid, alloc_grid, makemask + public :: init_grid1, init_grid2, grid_average_X2Y, & + alloc_grid, makemask, grid_neighbor_min, grid_neighbor_max character (len=char_len_long), public :: & grid_format , & ! file format ('bin'=binary or 'nc'=netcdf) gridcpl_file , & ! input file for POP coupling grid info grid_file , & ! input file for POP grid info kmt_file , & ! input file for POP grid info + kmt_type , & ! options are file, default, boxislands bathymetry_file, & ! input bathymetry for seabed stress bathymetry_format, & ! bathymetry file format (default or pop) grid_spacing , & ! default of 30.e3m or set by user in namelist + grid_ice , & ! Underlying model grid structure (A, B, C, CD) + grid_ice_thrm, & ! ocean forcing grid for thermo fields (T, U, N, E) + grid_ice_dynu, & ! ocean forcing grid for dyn U fields (T, U, N, E) + grid_ice_dynv, & ! ocean forcing grid for dyn V fields (T, U, N, E) + grid_atm , & ! atmos forcing grid structure (A, B, C, CD) + grid_atm_thrm, & ! atmos forcing grid for thermo fields (T, U, N, E) + grid_atm_dynu, & ! atmos forcing grid for dyn U fields (T, U, N, E) + grid_atm_dynv, & ! atmos forcing grid for dyn V fields (T, U, N, E) + grid_ocn , & ! ocean forcing grid structure (A B, C, CD) + grid_ocn_thrm, & ! ocean forcing grid for thermo fields (T, U, N, E) + grid_ocn_dynu, & ! ocean forcing grid for dyn U fields (T, U, N, E) + grid_ocn_dynv, & ! ocean forcing grid for dyn V fields (T, U, N, E) grid_type ! current options are rectangular (default), ! displaced_pole, tripole, regional @@ -59,19 +77,30 @@ module ice_grid dyt , & ! height of T-cell through the middle (m) dxu , & ! width of U-cell through the middle (m) dyu , & ! height of U-cell through the middle (m) + dxn , & ! width of N-cell through the middle (m) + dyn , & ! height of N-cell through the middle (m) + dxe , & ! width of E-cell through the middle (m) + dye , & ! height of E-cell through the middle (m) HTE , & ! length of eastern edge of T-cell (m) HTN , & ! length of northern edge of T-cell (m) tarea , & ! area of T-cell (m^2) uarea , & ! area of U-cell (m^2) + narea , & ! area of N-cell (m^2) + earea , & ! area of E-cell (m^2) tarear , & ! 1/tarea uarear , & ! 1/uarea - tinyarea,& ! puny*tarea + narear , & ! 1/narea + earear , & ! 1/earea tarean , & ! area of NH T-cells tareas , & ! area of SH T-cells - ULON , & ! longitude of velocity pts (radians) - ULAT , & ! latitude of velocity pts (radians) - TLON , & ! longitude of temp pts (radians) - TLAT , & ! latitude of temp pts (radians) + ULON , & ! longitude of velocity pts, NE corner of T pts (radians) + ULAT , & ! latitude of velocity pts, NE corner of T pts (radians) + TLON , & ! longitude of temp (T) pts (radians) + TLAT , & ! latitude of temp (T) pts (radians) + NLON , & ! longitude of center of north face of T pts (radians) + NLAT , & ! latitude of center of north face of T pts (radians) + ELON , & ! longitude of center of east face of T pts (radians) + ELAT , & ! latitude of center of east face of T pts (radians) ANGLE , & ! for conversions between POP grid and lat/lon ANGLET , & ! ANGLE converted to T-cells bathymetry , & ! ocean depth, for grounding keels and bergs (m) @@ -90,6 +119,12 @@ module ice_grid dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + ratiodxN , & ! - dxn(i+1,j) / dxn(i,j) + ratiodyE , & ! - dye(i ,j+1) / dye(i,j) + ratiodxNr , & ! 1 / ratiodxN + ratiodyEr ! 1 / ratiodyE + ! grid dimensions for rectangular grid real (kind=dbl_kind), public :: & dxrect, & ! user_specified spacing (cm) in x-direction (uniform HTN) @@ -100,7 +135,11 @@ module ice_grid lont_bounds, & ! longitude of gridbox corners for T point latt_bounds, & ! latitude of gridbox corners for T point lonu_bounds, & ! longitude of gridbox corners for U point - latu_bounds ! latitude of gridbox corners for U point + latu_bounds, & ! latitude of gridbox corners for U point + lonn_bounds, & ! longitude of gridbox corners for N point + latn_bounds, & ! latitude of gridbox corners for N point + lone_bounds, & ! longitude of gridbox corners for E point + late_bounds ! latitude of gridbox corners for E point ! geometric quantities used for remapping transport real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & @@ -126,7 +165,9 @@ module ice_grid real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & hm , & ! land/boundary mask, thickness (T-cell) bm , & ! task/block id - uvm , & ! land/boundary mask, velocity (U-cell) + uvm , & ! land/boundary mask (U-cell) + npm , & ! land/boundary mask (N-cell) + epm , & ! land/boundary mask (E-cell) kmt ! ocean topography mask for bathymetry (T-cell) logical (kind=log_kind), public :: & @@ -136,7 +177,10 @@ module ice_grid logical (kind=log_kind), & dimension (:,:,:), allocatable, public :: & tmask , & ! land/boundary mask, thickness (T-cell) - umask , & ! land/boundary mask, velocity (U-cell) + umask , & ! land/boundary mask (U-cell) (1 if all surrounding T cells are ocean) + umaskCD, & ! land/boundary mask (U-cell) (1 if at least two surrounding T cells are ocean) + nmask , & ! land/boundary mask, (N-cell) + emask , & ! land/boundary mask, (E-cell) lmask_n, & ! northern hemisphere mask lmask_s ! southern hemisphere mask @@ -146,6 +190,11 @@ module ice_grid logical (kind=log_kind), private :: & l_readCenter ! If anglet exist in grid file read it otherwise calculate it + interface grid_average_X2Y + module procedure grid_average_X2Y_base , & + grid_average_X2Y_userwghts, & + grid_average_X2Y_NEversion + end interface !======================================================================= @@ -166,19 +215,30 @@ subroutine alloc_grid dyt (nx_block,ny_block,max_blocks), & ! height of T-cell through the middle (m) dxu (nx_block,ny_block,max_blocks), & ! width of U-cell through the middle (m) dyu (nx_block,ny_block,max_blocks), & ! height of U-cell through the middle (m) + dxn (nx_block,ny_block,max_blocks), & ! width of N-cell through the middle (m) + dyn (nx_block,ny_block,max_blocks), & ! height of N-cell through the middle (m) + dxe (nx_block,ny_block,max_blocks), & ! width of E-cell through the middle (m) + dye (nx_block,ny_block,max_blocks), & ! height of E-cell through the middle (m) HTE (nx_block,ny_block,max_blocks), & ! length of eastern edge of T-cell (m) HTN (nx_block,ny_block,max_blocks), & ! length of northern edge of T-cell (m) tarea (nx_block,ny_block,max_blocks), & ! area of T-cell (m^2) uarea (nx_block,ny_block,max_blocks), & ! area of U-cell (m^2) + narea (nx_block,ny_block,max_blocks), & ! area of N-cell (m^2) + earea (nx_block,ny_block,max_blocks), & ! area of E-cell (m^2) tarear (nx_block,ny_block,max_blocks), & ! 1/tarea uarear (nx_block,ny_block,max_blocks), & ! 1/uarea - tinyarea (nx_block,ny_block,max_blocks), & ! puny*tarea + narear (nx_block,ny_block,max_blocks), & ! 1/narea + earear (nx_block,ny_block,max_blocks), & ! 1/earea tarean (nx_block,ny_block,max_blocks), & ! area of NH T-cells tareas (nx_block,ny_block,max_blocks), & ! area of SH T-cells - ULON (nx_block,ny_block,max_blocks), & ! longitude of velocity pts (radians) - ULAT (nx_block,ny_block,max_blocks), & ! latitude of velocity pts (radians) - TLON (nx_block,ny_block,max_blocks), & ! longitude of temp pts (radians) - TLAT (nx_block,ny_block,max_blocks), & ! latitude of temp pts (radians) + ULON (nx_block,ny_block,max_blocks), & ! longitude of U pts, NE corner (radians) + ULAT (nx_block,ny_block,max_blocks), & ! latitude of U pts, NE corner (radians) + TLON (nx_block,ny_block,max_blocks), & ! longitude of T pts (radians) + TLAT (nx_block,ny_block,max_blocks), & ! latitude of T pts (radians) + NLON (nx_block,ny_block,max_blocks), & ! longitude of N pts, N face (radians) + NLAT (nx_block,ny_block,max_blocks), & ! latitude of N pts, N face (radians) + ELON (nx_block,ny_block,max_blocks), & ! longitude of E pts, E face (radians) + ELAT (nx_block,ny_block,max_blocks), & ! latitude of E pts, E face (radians) ANGLE (nx_block,ny_block,max_blocks), & ! for conversions between POP grid and lat/lon ANGLET (nx_block,ny_block,max_blocks), & ! ANGLE converted to T-cells bathymetry(nx_block,ny_block,max_blocks),& ! ocean depth, for grounding keels and bergs (m) @@ -195,17 +255,26 @@ subroutine alloc_grid yyav (nx_block,ny_block,max_blocks), & ! mean T-cell value of yy hm (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) bm (nx_block,ny_block,max_blocks), & ! task/block id - uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) + uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) - water in case of all water point + npm (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) + epm (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) kmt (nx_block,ny_block,max_blocks), & ! ocean topography mask for bathymetry (T-cell) tmask (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) umask (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) + umaskCD (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) + nmask (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) + emask (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) lmask_n (nx_block,ny_block,max_blocks), & ! northern hemisphere mask lmask_s (nx_block,ny_block,max_blocks), & ! southern hemisphere mask rndex_global(nx_block,ny_block,max_blocks), & ! global index for local subdomain (dbl) lont_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for T point latt_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for T point lonu_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for U point - latu_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for U point + latu_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for U point + lonn_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for N point + latn_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for N point + lone_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for E point + late_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for E point mne (2,2,nx_block,ny_block,max_blocks), & ! matrices used for coordinate transformations in remapping mnw (2,2,nx_block,ny_block,max_blocks), & ! ne = northeast corner, nw = northwest, etc. mse (2,2,nx_block,ny_block,max_blocks), & @@ -213,6 +282,16 @@ subroutine alloc_grid stat=ierr) if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') + if (grid_ice == 'CD' .or. grid_ice == 'C') then + allocate( & + ratiodxN (nx_block,ny_block,max_blocks), & + ratiodyE (nx_block,ny_block,max_blocks), & + ratiodxNr(nx_block,ny_block,max_blocks), & + ratiodyEr(nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') + endif + if (pgl_global_ext) then allocate( & G_HTE(nx_global+2*nghost, ny_global+2*nghost), & ! length of eastern edge of T-cell (global ext.) @@ -321,7 +400,7 @@ subroutine init_grid1 ! distribute blocks among processors !----------------------------------------------------------------- - call init_domain_distribution(work_g2, work_g1) ! KMT, ULAT + call init_domain_distribution(work_g2, work_g1, grid_ice) ! KMT, ULAT deallocate(work_g1) deallocate(work_g2) @@ -355,7 +434,7 @@ subroutine init_grid2 use ice_blocks, only: get_block, block, nx_block, ny_block use ice_constants, only: c0, c1, c2, p5, p25, c1p5, & - field_loc_center, field_loc_NEcorner, & + field_loc_center, field_loc_NEcorner, field_loc_Nface, field_loc_Eface, & field_type_scalar, field_type_vector, field_type_angle use ice_domain_size, only: max_blocks #if defined (_OPENMP) @@ -439,6 +518,10 @@ subroutine init_grid2 !----------------------------------------------------------------- ! T-grid cell and U-grid cell quantities + ! Fill halo data locally where possible to avoid missing + ! data associated with land block elimination + ! Note: HTN, HTE, dx*, dy* are all defined from global arrays + ! at halos. !----------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) @@ -449,10 +532,13 @@ subroutine init_grid2 jlo = this_block%jlo jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi + do j = 1,ny_block + do i = 1,nx_block tarea(i,j,iblk) = dxt(i,j,iblk)*dyt(i,j,iblk) uarea(i,j,iblk) = dxu(i,j,iblk)*dyu(i,j,iblk) + narea(i,j,iblk) = dxn(i,j,iblk)*dyn(i,j,iblk) + earea(i,j,iblk) = dxe(i,j,iblk)*dye(i,j,iblk) + if (tarea(i,j,iblk) > c0) then tarear(i,j,iblk) = c1/tarea(i,j,iblk) else @@ -463,8 +549,22 @@ subroutine init_grid2 else uarear(i,j,iblk) = c0 ! possible on boundaries endif - tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) + if (narea(i,j,iblk) > c0) then + narear(i,j,iblk) = c1/narea(i,j,iblk) + else + narear(i,j,iblk) = c0 ! possible on boundaries + endif + if (earea(i,j,iblk) > c0) then + earear(i,j,iblk) = c1/earea(i,j,iblk) + else + earear(i,j,iblk) = c0 ! possible on boundaries + endif + + enddo + enddo + do j = jlo, jhi + do i = ilo, ihi dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) enddo @@ -480,6 +580,17 @@ subroutine init_grid2 enddo enddo + if (grid_ice == 'CD' .or. grid_ice == 'C') then + do j = jlo, jhi + do i = ilo, ihi + ratiodxN (i,j,iblk) = - dxn(i+1,j ,iblk) / dxn(i,j,iblk) + ratiodyE (i,j,iblk) = - dye(i ,j+1,iblk) / dye(i,j,iblk) + ratiodxNr(i,j,iblk) = c1 / ratiodxn(i,j,iblk) + ratiodyEr(i,j,iblk) = c1 / ratiodye(i,j,iblk) + enddo + enddo + endif + enddo ! iblk !$OMP END PARALLEL DO @@ -494,21 +605,6 @@ subroutine init_grid2 !----------------------------------------------------------------- call ice_timer_start(timer_bound) - call ice_HaloUpdate (tarea, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (uarea, halo_info, & - field_loc_NEcorner, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (tarear, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (uarear, halo_info, & - field_loc_NEcorner, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (tinyarea, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) call ice_HaloUpdate (dxhy, halo_info, & field_loc_center, field_type_vector, & fillValue=c1) @@ -615,6 +711,7 @@ subroutine init_grid2 !---------------------------------------------------------------- call gridbox_corners + call gridbox_edges !----------------------------------------------------------------- ! Compute global index (used for unpacking messages from coupler) @@ -658,7 +755,7 @@ end subroutine init_grid2 subroutine popgrid use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c1, & + use ice_constants, only: c0, c1, p5, & field_loc_center, field_loc_NEcorner, & field_type_scalar, field_type_angle use ice_domain_size, only: max_blocks @@ -706,7 +803,7 @@ subroutine popgrid do j = jlo, jhi do i = ilo, ihi kmt(i,j,iblk) = work1(i,j,iblk) - if (kmt(i,j,iblk) >= c1) hm(i,j,iblk) = c1 + if (kmt(i,j,iblk) >= p5) hm(i,j,iblk) = c1 enddo enddo enddo @@ -743,10 +840,10 @@ subroutine popgrid !----------------------------------------------------------------- call ice_read_global(nu_grid,3,work_g1,'rda8',.true.) ! HTN - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe call ice_read_global(nu_grid,4,work_g1,'rda8',.true.) ! HTE - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye deallocate(work_g1) @@ -920,10 +1017,10 @@ subroutine popgrid_nc fieldname='htn' call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! HTN - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe fieldname='hte' call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! HTE - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye deallocate(work_g1) @@ -1177,7 +1274,6 @@ subroutine latlongrid endif tarear(i,j,iblk) = c1/tarea(i,j,iblk) uarear(i,j,iblk) = c1/uarea(i,j,iblk) - tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) if (single_column) then ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/nj) @@ -1189,6 +1285,10 @@ subroutine latlongrid endif endif ULON (i,j,iblk) = c0 + NLON (i,j,iblk) = c0 + NLAT (i,j,iblk) = c0 + ELON (i,j,iblk) = c0 + ELAT (i,j,iblk) = c0 ANGLE (i,j,iblk) = c0 ANGLET(i,j,iblk) = c0 @@ -1198,6 +1298,10 @@ subroutine latlongrid dyt (i,j,iblk) = 1.e36_dbl_kind dxu (i,j,iblk) = 1.e36_dbl_kind dyu (i,j,iblk) = 1.e36_dbl_kind + dxn (i,j,iblk) = 1.e36_dbl_kind + dyn (i,j,iblk) = 1.e36_dbl_kind + dxe (i,j,iblk) = 1.e36_dbl_kind + dye (i,j,iblk) = 1.e36_dbl_kind dxhy (i,j,iblk) = 1.e36_dbl_kind dyhx (i,j,iblk) = 1.e36_dbl_kind cyp (i,j,iblk) = 1.e36_dbl_kind @@ -1309,7 +1413,7 @@ subroutine rectgrid enddo enddo endif - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe if (my_task == master_task) then do j = 1, ny_global @@ -1318,7 +1422,7 @@ subroutine rectgrid enddo enddo endif - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye !----------------------------------------------------------------- ! Construct T-cell land mask @@ -1328,15 +1432,27 @@ subroutine rectgrid if (my_task == master_task) then work_g1(:,:) = c0 ! initialize hm as land - if (trim(ew_boundary_type) == 'cyclic') then + if (trim(kmt_type) == 'boxislands') then + + call grid_boxislands_kmt(work_g1) - do j = 3,ny_global-2 ! closed top and bottom - do i = 1,nx_global ! open sides - work_g1(i,j) = c1 ! NOTE nx_global > 5 + elseif (trim(kmt_type) == 'channel') then + + do j = 3,ny_global-2 ! closed top and bottom + do i = 1,nx_global ! open sides + work_g1(i,j) = c1 ! NOTE nx_global > 5 + enddo + enddo + + elseif (trim(kmt_type) == 'wall') then + + do j = 1,ny_global ! open except + do i = 1,nx_global-2 ! closed east edge + work_g1(i,j) = c1 enddo enddo - elseif (trim(ew_boundary_type) == 'open') then + elseif (trim(kmt_type) == 'default') then ! land in the upper left and lower right corners, ! otherwise open boundaries @@ -1351,32 +1467,33 @@ subroutine rectgrid if (nx_global > 5 .and. ny_global > 5) then - do j = 1, jmid+2 - do i = 1, imid+2 - work_g1(i,j) = c1 ! open lower left corner - enddo - enddo + do j = 1, jmid+2 + do i = 1, imid+2 + work_g1(i,j) = c1 ! open lower left corner + enddo + enddo - do j = max(jmid-2,1), ny_global - do i = max(imid-2,1), nx_global - work_g1(i,j) = c1 ! open upper right corner - enddo - enddo + do j = max(jmid-2,1), ny_global + do i = max(imid-2,1), nx_global + work_g1(i,j) = c1 ! open upper right corner + enddo + enddo - endif + endif ! > 5x5 grid - if (close_boundaries) then - work_g1(:, 1:2) = c0 - work_g1(:, ny_global-1:ny_global) = c0 - work_g1(1:2, :) = c0 - work_g1(nx_global-1:nx_global, :) = c0 - endif + else - elseif (trim(ew_boundary_type) == 'closed') then + call abort_ice(subname//'ERROR: unknown kmt_type '//trim(kmt_type)) - call abort_ice(subname//'ERROR: closed boundaries not available') + endif ! kmt_type + if (close_boundaries) then + work_g1(:, 1:2) = c0 + work_g1(:, ny_global-1:ny_global) = c0 + work_g1(1:2, :) = c0 + work_g1(nx_global-1:nx_global, :) = c0 endif + endif call scatter_global(hm, work_g1, master_task, distrb_info, & @@ -1386,6 +1503,134 @@ subroutine rectgrid end subroutine rectgrid +!======================================================================= + + ! Complex land mask for testing box cases + ! Requires nx_global, ny_global > 20 + ! Assumes work array has been initialized to 1 (ocean) and north and + ! south land boundaries have been applied (ew_boundary_type='cyclic') + + subroutine grid_boxislands_kmt (work) + + use ice_constants, only: c0, c1, c20 + + real (kind=dbl_kind), dimension(:,:), intent(inout) :: work + + integer (kind=int_kind) :: & + i, j, k, & ! indices + nxb, nyb ! convenient cell-block sizes for building the mask + + character(len=*), parameter :: subname = '(grid_boxislands_kmt)' + + ! number of cells in 5% of global grid x and y lengths + nxb = int(real(nx_global, dbl_kind) / c20, int_kind) + nyb = int(real(ny_global, dbl_kind) / c20, int_kind) + + if (nxb < 1 .or. nyb < 1) & + call abort_ice(subname//'ERROR: requires larger grid size') + + ! initialize work area as all ocean (c1). + work(:,:) = c1 + + ! now add land points (c0) + ! northeast triangle + k = 0 + do j = ny_global, ny_global-3*nyb, -1 + k = k+1 + do i = nx_global-3*nxb+k, nx_global + work(i,j) = c0 + enddo + enddo + + ! northwest docks + do j = ny_global-3*nyb, ny_global + do i = 1, 1 + work(i,j) = c0 + enddo + enddo + do i = 1, 2*nxb + do j = ny_global-3*nyb, ny_global-nyb-2 + work(i,j) = c0 + enddo + do j = ny_global-nyb, ny_global-nyb+1 + work(i,j) = c0 + enddo + enddo + + ! southwest docks + do j = 2*nyb, 3*nyb + do i = 1, 1 + work(i,j) = c0 + enddo + enddo + do j = 1, 2*nyb + do i = 2, nxb + work(i,j) = c0 + enddo + do i = 2*nxb-1, 2*nxb + work(i,j) = c0 + enddo + do i = 2*nxb+2,4*nxb + work(i,j) = c0 + enddo + enddo + + ! tiny island + do j = 14*nyb, 14*nyb+1 + do i = 14*nxb, 14*nxb+1 + work(i,j) = c0 + enddo + enddo + + ! X islands + ! left triangle + k = 0 + do i = 2*nxb, 4*nxb + k=k+1 + do j = 10*nyb+k, 14*nyb-k + work(i,j) = c0 + enddo + enddo + ! upper triangle + k = 0 + do j = 14*nyb, 12*nyb, -1 + k=k+1 + do i = 2*nxb+2+k, 6*nxb-2-k + work(i,j) = c0 + enddo + enddo + ! diagonal + k = 0 + do j = 10*nyb, 14*nyb + k=k+1 + do i = 2*nxb+4+k, 2*nxb+6+k + work(i,j) = c0 + enddo + enddo + ! lower right triangle + k = 0 + do j = 12*nyb, 10*nyb, -1 + k=k+1 + do i = 5*nxb+k, 8*nxb + work(i,j) = c0 + enddo + enddo + + ! bar islands + do i = 10*nxb, 16*nxb + do j = 4*nyb, 5*nyb + work(i,j) = c0 + enddo + do j = 6*nyb+2, 8*nyb + work(i,j) = c0 + enddo + do j = 8*nyb+2, 8*nyb+3 + work(i,j) = c0 + enddo + enddo + + end subroutine grid_boxislands_kmt + !======================================================================= ! CPOM displaced pole grid and land mask. \\ @@ -1472,11 +1717,11 @@ subroutine cpomgrid call ice_read_global(nu_grid,3,work_g1, 'rda8',diag) work_g1 = work_g1 * m_to_cm - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe call ice_read_global(nu_grid,4,work_g1, 'rda8',diag) work_g1 = work_g1 * m_to_cm - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye call ice_read_global(nu_grid,7,work_g1,'rda8',diag) call scatter_global(ANGLE, work_g1, master_task, distrb_info, & @@ -1508,7 +1753,7 @@ end subroutine cpomgrid subroutine primary_grid_lengths_HTN(work_g) - use ice_constants, only: p5, c2, cm_to_m, & + use ice_constants, only: p25, p5, c2, cm_to_m, & field_loc_center, field_loc_NEcorner, & field_loc_Nface, field_type_scalar @@ -1531,20 +1776,22 @@ subroutine primary_grid_lengths_HTN(work_g) allocate(work_g2(1,1)) endif + ! HTN, dxu = average of 2 neighbor HTNs in i + if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g(i,j) = work_g(i,j) * cm_to_m ! HTN - enddo - enddo - do j = 1, ny_global - do i = 1, nx_global - ! assume cyclic; noncyclic will be handled during scatter - ip1 = i+1 - if (i == nx_global) ip1 = 1 - work_g2(i,j) = p5*(work_g(i,j) + work_g(ip1,j)) ! dxu - enddo - enddo + do j = 1, ny_global + do i = 1, nx_global + work_g(i,j) = work_g(i,j) * cm_to_m ! HTN + enddo + enddo + do j = 1, ny_global + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + ip1 = i+1 + if (i == nx_global) ip1 = 1 + work_g2(i,j) = p5*(work_g(i,j) + work_g(ip1,j)) ! dxu + enddo + enddo endif if (pgl_global_ext) then call primary_grid_lengths_global_ext( & @@ -1555,20 +1802,49 @@ subroutine primary_grid_lengths_HTN(work_g) call scatter_global(dxu, work_g2, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) + ! dxt = average of 2 neighbor HTNs in j + if (my_task == master_task) then - do j = 2, ny_global + do j = 2, ny_global do i = 1, nx_global work_g2(i,j) = p5*(work_g(i,j) + work_g(i,j-1)) ! dxt enddo - enddo - ! extrapolate to obtain dxt along j=1 - do i = 1, nx_global - work_g2(i,1) = c2*work_g(i,2) - work_g(i,3) ! dxt - enddo + enddo + ! extrapolate to obtain dxt along j=1 + do i = 1, nx_global + work_g2(i,1) = c2*work_g(i,2) - work_g(i,3) ! dxt + enddo endif call scatter_global(dxt, work_g2, master_task, distrb_info, & field_loc_center, field_type_scalar) + ! dxn = HTN + + dxn(:,:,:) = HTN(:,:,:) ! dxn + + ! dxe = average of 4 surrounding HTNs + + if (my_task == master_task) then + do j = 2, ny_global + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + ip1 = i+1 + if (i == nx_global) ip1 = 1 + work_g2(i,j) = p25*(work_g(i,j)+work_g(ip1,j)+work_g(i,j-1)+work_g(ip1,j-1)) ! dxe + enddo + enddo + ! extrapolate to obtain dxt along j=1 + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + ip1 = i+1 + if (i == nx_global) ip1 = 1 + work_g2(i,1) = p5*(c2*work_g(i ,2) - work_g(i ,3) + & + c2*work_g(ip1,2) - work_g(ip1,3)) ! dxe + enddo + endif + call scatter_global(dxe, work_g2, master_task, distrb_info, & + field_loc_center, field_type_scalar) + deallocate(work_g2) end subroutine primary_grid_lengths_HTN @@ -1582,7 +1858,7 @@ end subroutine primary_grid_lengths_HTN subroutine primary_grid_lengths_HTE(work_g) - use ice_constants, only: p5, c2, cm_to_m, & + use ice_constants, only: p25, p5, c2, cm_to_m, & field_loc_center, field_loc_NEcorner, & field_loc_Eface, field_type_scalar @@ -1605,6 +1881,8 @@ subroutine primary_grid_lengths_HTE(work_g) allocate(work_g2(1,1)) endif + ! HTE, dyu = average of 2 neighbor HTE in j + if (my_task == master_task) then do j = 1, ny_global do i = 1, nx_global @@ -1619,8 +1897,7 @@ subroutine primary_grid_lengths_HTE(work_g) ! extrapolate to obtain dyu along j=ny_global if (ny_global > 1) then do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g(i,ny_global-1) & - - work_g(i,ny_global-2) ! dyu + work_g2(i,ny_global) = c2*work_g(i,ny_global-1) - work_g(i,ny_global-2) ! dyu enddo endif endif @@ -1633,19 +1910,50 @@ subroutine primary_grid_lengths_HTE(work_g) call scatter_global(dyu, work_g2, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) + ! dyt = average of 2 neighbor HTE in i + if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - ! assume cyclic; noncyclic will be handled during scatter - im1 = i-1 - if (i == 1) im1 = nx_global - work_g2(i,j) = p5*(work_g(i,j) + work_g(im1,j)) ! dyt - enddo - enddo + do j = 1, ny_global + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + im1 = i-1 + if (i == 1) im1 = nx_global + work_g2(i,j) = p5*(work_g(i,j) + work_g(im1,j)) ! dyt + enddo + enddo endif call scatter_global(dyt, work_g2, master_task, distrb_info, & field_loc_center, field_type_scalar) + ! dyn = average of 4 neighbor HTEs + + if (my_task == master_task) then + do j = 1, ny_global-1 + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + im1 = i-1 + if (i == 1) im1 = nx_global + work_g2(i,j) = p25*(work_g(i,j) + work_g(im1,j) + work_g(i,j+1) + work_g(im1,j+1)) ! dyn + enddo + enddo + ! extrapolate to obtain dyn along j=ny_global + if (ny_global > 1) then + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + im1 = i-1 + if (i == 1) im1 = nx_global + work_g2(i,ny_global) = p5*(c2*work_g(i ,ny_global-1) - work_g(i ,ny_global-2) + & + c2*work_g(im1,ny_global-1) - work_g(im1,ny_global-2)) ! dyn + enddo + endif + endif + call scatter_global(dyn, work_g2, master_task, distrb_info, & + field_loc_center, field_type_scalar) + + ! dye = HTE + + dye(:,:,:) = HTE(:,:,:) + deallocate(work_g2) end subroutine primary_grid_lengths_HTE @@ -1653,15 +1961,17 @@ end subroutine primary_grid_lengths_HTE !======================================================================= ! Sets the boundary values for the T cell land mask (hm) and -! makes the logical land masks for T and U cells (tmask, umask). +! makes the logical land masks for T and U cells (tmask, umask) +! and N and E cells (nmask, emask). ! Also creates hemisphere masks (mask-n northern, mask-s southern) ! ! author: Elizabeth C. Hunke, LANL subroutine makemask - use ice_constants, only: c0, p5, & - field_loc_center, field_loc_NEcorner, field_type_scalar + use ice_constants, only: c0, p5, c1p5, & + field_loc_center, field_loc_NEcorner, field_type_scalar, & + field_loc_Nface, field_loc_Eface integer (kind=int_kind) :: & i, j, iblk, & @@ -1670,6 +1980,9 @@ subroutine makemask real (kind=dbl_kind) :: & puny + real (kind=dbl_kind), dimension(:,:,:), allocatable :: & + uvmCD + type (block) :: & this_block ! block information for current block @@ -1681,7 +1994,7 @@ subroutine makemask file=__FILE__, line=__LINE__) call ice_timer_start(timer_bound) - call ice_HaloUpdate (kmt, halo_info, & + call ice_HaloUpdate (kmt, halo_info, & field_loc_center, field_type_scalar) call ice_HaloUpdate (hm, halo_info, & field_loc_center, field_type_scalar) @@ -1692,6 +2005,7 @@ subroutine makemask !----------------------------------------------------------------- bm = c0 + allocate(uvmCD(nx_block,ny_block,max_blocks)) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks @@ -1705,7 +2019,11 @@ subroutine makemask do i = ilo, ihi uvm(i,j,iblk) = min (hm(i,j, iblk), hm(i+1,j, iblk), & hm(i,j+1,iblk), hm(i+1,j+1,iblk)) + npm(i,j,iblk) = min (hm(i,j, iblk), hm(i,j+1,iblk)) + epm(i,j,iblk) = min (hm(i,j, iblk), hm(i+1,j,iblk)) bm(i,j,iblk) = my_task + iblk/100.0_dbl_kind + uvmCD(i,j,iblk) = (hm(i,j, iblk)+hm(i+1,j, iblk) & + + hm(i,j+1,iblk)+hm(i+1,j+1,iblk)) enddo enddo enddo @@ -1714,8 +2032,14 @@ subroutine makemask call ice_timer_start(timer_bound) call ice_HaloUpdate (uvm, halo_info, & field_loc_NEcorner, field_type_scalar) - call ice_HaloUpdate (bm, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate (uvmCD, halo_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloUpdate (npm, halo_info, & + field_loc_Nface, field_type_scalar) + call ice_HaloUpdate (epm, halo_info, & + field_loc_Eface, field_type_scalar) + call ice_HaloUpdate (bm, halo_info, & + field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) @@ -1727,15 +2051,32 @@ subroutine makemask jhi = this_block%jhi ! needs to cover halo (no halo update for logicals) - tmask(:,:,iblk) = .false. - umask(:,:,iblk) = .false. + tmask(:,:,iblk) = .false. + umask(:,:,iblk) = .false. + umaskCD(:,:,iblk) = .false. + nmask(:,:,iblk) = .false. + emask(:,:,iblk) = .false. do j = jlo-nghost, jhi+nghost do i = ilo-nghost, ihi+nghost - if ( hm(i,j,iblk) > p5) tmask(i,j,iblk) = .true. - if (uvm(i,j,iblk) > p5) umask(i,j,iblk) = .true. + if ( hm(i,j,iblk) > p5 ) tmask (i,j,iblk) = .true. + if (uvm(i,j,iblk) > p5 ) umask (i,j,iblk) = .true. + if (uvmCD(i,j,iblk) > c1p5) umaskCD(i,j,iblk) = .true. + if (npm(i,j,iblk) > p5 ) nmask (i,j,iblk) = .true. + if (epm(i,j,iblk) > p5 ) emask (i,j,iblk) = .true. enddo enddo + enddo + !$OMP END PARALLEL DO + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + !----------------------------------------------------------------- ! create hemisphere masks !----------------------------------------------------------------- @@ -1769,6 +2110,8 @@ subroutine makemask enddo ! iblk !$OMP END PARALLEL DO + deallocate(uvmCD) + end subroutine makemask !======================================================================= @@ -1780,8 +2123,9 @@ end subroutine makemask subroutine Tlatlon - use ice_constants, only: c0, c1, c2, c4, & - field_loc_center, field_type_scalar + use ice_constants, only: c0, c1, c1p5, c2, c4, p5, & + field_loc_center, field_loc_Nface, field_loc_Eface, & + field_type_scalar integer (kind=int_kind) :: & i, j, iblk , & ! horizontal indices @@ -1803,6 +2147,10 @@ subroutine Tlatlon TLAT(:,:,:) = c0 TLON(:,:,:) = c0 + NLAT(:,:,:) = c0 + NLON(:,:,:) = c0 + ELAT(:,:,:) = c0 + ELON(:,:,:) = c0 !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & !$OMP x1,y1,z1,x2,y2,z2,x3,y3,z3,x4,y4,z4, & @@ -1837,6 +2185,10 @@ subroutine Tlatlon y4 = sin(ULON(i,j,iblk))*z4 z4 = sin(ULAT(i,j,iblk)) + ! --------- + ! TLON/TLAT 4 pt computation (pts 1, 2, 3, 4) + ! --------- + tx = (x1+x2+x3+x4)/c4 ty = (y1+y2+y3+y4)/c4 tz = (z1+z2+z3+z4)/c4 @@ -1850,11 +2202,90 @@ subroutine Tlatlon ! TLAT in radians North TLAT(i,j,iblk) = asin(tz) + +! these two loops should be merged to save cos/sin calculations, +! but atan2 is not bit-for-bit. This suggests the result for atan2 depends on +! the prior atan2 call ??? not sure what's going on. +#if (1 == 1) + enddo ! i + enddo ! j + enddo ! iblk + !$OMP END PARALLEL DO + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & + !$OMP x1,y1,z1,x2,y2,z2,x3,y3,z3,x4,y4,z4, & + !$OMP tx,ty,tz,da) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + + z1 = cos(ULAT(i-1,j-1,iblk)) + x1 = cos(ULON(i-1,j-1,iblk))*z1 + y1 = sin(ULON(i-1,j-1,iblk))*z1 + z1 = sin(ULAT(i-1,j-1,iblk)) + + z2 = cos(ULAT(i,j-1,iblk)) + x2 = cos(ULON(i,j-1,iblk))*z2 + y2 = sin(ULON(i,j-1,iblk))*z2 + z2 = sin(ULAT(i,j-1,iblk)) + + z3 = cos(ULAT(i-1,j,iblk)) + x3 = cos(ULON(i-1,j,iblk))*z3 + y3 = sin(ULON(i-1,j,iblk))*z3 + z3 = sin(ULAT(i-1,j,iblk)) + + z4 = cos(ULAT(i,j,iblk)) + x4 = cos(ULON(i,j,iblk))*z4 + y4 = sin(ULON(i,j,iblk))*z4 + z4 = sin(ULAT(i,j,iblk)) +#endif + ! --------- + ! NLON/NLAT 2 pt computation (pts 3, 4) + ! --------- + + tx = (x3+x4)/c2 + ty = (y3+y4)/c2 + tz = (z3+z4)/c2 + da = sqrt(tx**2+ty**2+tz**2) + + tz = tz/da + + ! NLON in radians East + NLON(i,j,iblk) = c0 + if (tx /= c0 .or. ty /= c0) NLON(i,j,iblk) = atan2(ty,tx) + + ! NLAT in radians North + NLAT(i,j,iblk) = asin(tz) + + ! --------- + ! ELON/ELAT 2 pt computation (pts 2, 4) + ! --------- + + tx = (x2+x4)/c2 + ty = (y2+y4)/c2 + tz = (z2+z4)/c2 + da = sqrt(tx**2+ty**2+tz**2) + + tz = tz/da + + ! ELON in radians East + ELON(i,j,iblk) = c0 + if (tx /= c0 .or. ty /= c0) ELON(i,j,iblk) = atan2(ty,tx) + + ! ELAT in radians North + ELAT(i,j,iblk) = asin(tz) enddo ! i enddo ! j enddo ! iblk !$OMP END PARALLEL DO + if (trim(grid_type) == 'regional') then ! for W boundary extrapolate from interior !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) @@ -1872,6 +2303,10 @@ subroutine Tlatlon TLON(i+2,j,iblk) TLAT(i,j,iblk) = c2*TLAT(i+1,j,iblk) - & TLAT(i+2,j,iblk) + NLON(i,j,iblk) = c1p5*TLON(i+1,j,iblk) - & + p5*TLON(i+2,j,iblk) + NLAT(i,j,iblk) = c1p5*TLAT(i+1,j,iblk) - & + p5*TLAT(i+2,j,iblk) enddo endif enddo @@ -1885,10 +2320,30 @@ subroutine Tlatlon call ice_HaloUpdate (TLAT, halo_info, & field_loc_center, field_type_scalar, & fillValue=c1) + call ice_HaloUpdate (NLON, halo_info, & + field_loc_Nface, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (NLAT, halo_info, & + field_loc_Nface, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (ELON, halo_info, & + field_loc_Eface, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (ELAT, halo_info, & + field_loc_Eface, field_type_scalar, & + fillValue=c1) call ice_HaloExtrapolate(TLON, distrb_info, & ew_boundary_type, ns_boundary_type) call ice_HaloExtrapolate(TLAT, distrb_info, & ew_boundary_type, ns_boundary_type) + call ice_HaloExtrapolate(NLON, distrb_info, & + ew_boundary_type, ns_boundary_type) + call ice_HaloExtrapolate(NLAT, distrb_info, & + ew_boundary_type, ns_boundary_type) + call ice_HaloExtrapolate(ELON, distrb_info, & + ew_boundary_type, ns_boundary_type) + call ice_HaloExtrapolate(ELAT, distrb_info, & + ew_boundary_type, ns_boundary_type) call ice_timer_stop(timer_bound) x1 = global_minval(TLON, distrb_info, tmask) @@ -1903,154 +2358,1090 @@ subroutine Tlatlon if (my_task==master_task) then write(nu_diag,*) ' ' - if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then +! if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then write(nu_diag,*) 'min/max ULON:', y1*rad_to_deg, y2*rad_to_deg write(nu_diag,*) 'min/max ULAT:', y3*rad_to_deg, y4*rad_to_deg - endif +! endif write(nu_diag,*) 'min/max TLON:', x1*rad_to_deg, x2*rad_to_deg write(nu_diag,*) 'min/max TLAT:', x3*rad_to_deg, x4*rad_to_deg endif ! my_task + x1 = global_minval(NLON, distrb_info, nmask) + x2 = global_maxval(NLON, distrb_info, nmask) + x3 = global_minval(NLAT, distrb_info, nmask) + x4 = global_maxval(NLAT, distrb_info, nmask) + + y1 = global_minval(ELON, distrb_info, emask) + y2 = global_maxval(ELON, distrb_info, emask) + y3 = global_minval(ELAT, distrb_info, emask) + y4 = global_maxval(ELAT, distrb_info, emask) + + if (my_task==master_task) then + write(nu_diag,*) ' ' +! if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then + write(nu_diag,*) 'min/max NLON:', x1*rad_to_deg, x2*rad_to_deg + write(nu_diag,*) 'min/max NLAT:', x3*rad_to_deg, x4*rad_to_deg + write(nu_diag,*) 'min/max ELON:', y1*rad_to_deg, y2*rad_to_deg + write(nu_diag,*) 'min/max ELAT:', y3*rad_to_deg, y4*rad_to_deg +! endif + endif ! my_task + end subroutine Tlatlon !======================================================================= -! Transfer vector component from T-cell centers to U-cell centers. +! Shifts quantities from one grid to another +! Constructs the shift based on the grid +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. ! -! author: Elizabeth C. Hunke, LANL +! author: T. Craig - subroutine t2ugrid_vector (work) - - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: field_loc_center, field_type_vector - use ice_domain_size, only: max_blocks + subroutine grid_average_X2Y_base(type,work1,grid1,work2,grid2) - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(inout) :: & - work + character(len=*) , intent(in) :: & + type, grid1, grid2 - ! local variables + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:) - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) - character(len=*), parameter :: subname = '(t2ugrid_vector)' + ! local variables - work1(:,:,:) = work(:,:,:) + character(len=16) :: X2Y - call ice_timer_start(timer_bound) - call ice_HaloUpdate (work1, halo_info, & - field_loc_center, field_type_vector) - call ice_timer_stop(timer_bound) + character(len=*), parameter :: subname = '(grid_average_X2Y_base)' - call to_ugrid(work1,work) + if (trim(grid1) == trim(grid2)) then + work2 = work1 + else + X2Y = trim(grid1)//'2'//trim(grid2)//trim(type) + call grid_average_X2Y_1(X2Y,work1,work2) + endif - end subroutine t2ugrid_vector + end subroutine grid_average_X2Y_base !======================================================================= -! Shifts quantities from the T-cell midpoint (work1) to the U-cell -! midpoint (work2) +! Shifts quantities from one grid to another ! NOTE: Input array includes ghost cells that must be updated before ! calling this routine. ! -! author: Elizabeth C. Hunke, LANL +! author: T. Craig - subroutine to_ugrid(work1,work2) + subroutine grid_average_X2Y_userwghts(type,work1,grid1,wght1,mask1,work2,grid2) - use ice_constants, only: c0, p25 + character(len=*) , intent(in) :: & + type, grid1, grid2 real (kind=dbl_kind), intent(in) :: & - work1(nx_block,ny_block,max_blocks) + work1(:,:,:), & + wght1(:,:,:), & + mask1(:,:,:) - real (kind=dbl_kind), intent(out) :: & - work2(nx_block,ny_block,max_blocks) + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) - type (block) :: & - this_block ! block information for current block + ! local variables - character(len=*), parameter :: subname = '(to_ugrid)' + character(len=16) :: X2Y - ! local variables + character(len=*), parameter :: subname = '(grid_average_X2Y_userwghts)' + + if (trim(grid1) == trim(grid2)) then + work2 = work1 + else + X2Y = trim(grid1)//'2'//trim(grid2)//trim(type) + call grid_average_X2Y_1f(X2Y,work1,wght1,mask1,work2) + endif + + end subroutine grid_average_X2Y_userwghts + +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_NEversion(type,work1a,grid1a,work1b,grid1b,work2,grid2) + + character(len=*) , intent(in) :: & + type, grid1a, grid1b, grid2 + + real (kind=dbl_kind), intent(in) :: & + work1a(:,:,:), work1b(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=16) :: X2Y + + character(len=*), parameter :: subname = '(grid_average_X2Y_NEversion)' + + X2Y = trim(grid1a)//trim(grid1b)//'2'//trim(grid2)//trim(type) + + select case (trim(X2Y)) + + ! state masked + case('NE2US') + call grid_average_X2YS_2('NE2US',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2US') + call grid_average_X2YS_2('NE2US',work1b,narea,npm,work1a,earea,epm,work2) + case('NE2TS') + call grid_average_X2YS_2('NE2TS',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2TS') + call grid_average_X2YS_2('NE2TS',work1b,narea,npm,work1a,earea,epm,work2) + + case default + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) + end select + + end subroutine grid_average_X2Y_NEversion + +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_1(X2Y,work1,work2) + + character(len=*) , intent(in) :: & + X2Y + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=*), parameter :: subname = '(grid_average_X2Y_1)' + + select case (trim(X2Y)) + + ! flux unmasked + case('T2UF') + call grid_average_X2YF('NE',work1,tarea,work2,uarea) + case('T2EF') + call grid_average_X2YF('E' ,work1,tarea,work2,earea) + case('T2NF') + call grid_average_X2YF('N' ,work1,tarea,work2,narea) + case('U2TF') + call grid_average_X2YF('SW',work1,uarea,work2,tarea) + case('U2EF') + call grid_average_X2YF('S' ,work1,uarea,work2,earea) + case('U2NF') + call grid_average_X2YF('W' ,work1,uarea,work2,narea) + case('E2TF') + call grid_average_X2YF('W' ,work1,earea,work2,tarea) + case('E2UF') + call grid_average_X2YF('N' ,work1,earea,work2,uarea) + case('E2NF') + call grid_average_X2YF('NW',work1,earea,work2,narea) + case('N2TF') + call grid_average_X2YF('S' ,work1,narea,work2,tarea) + case('N2UF') + call grid_average_X2YF('E' ,work1,narea,work2,uarea) + case('N2EF') + call grid_average_X2YF('SE',work1,narea,work2,earea) + + ! state masked + case('T2US') + call grid_average_X2YS('NE',work1,tarea,hm ,work2) + case('T2ES') + call grid_average_X2YS('E' ,work1,tarea,hm ,work2) + case('T2NS') + call grid_average_X2YS('N' ,work1,tarea,hm ,work2) + case('U2TS') + call grid_average_X2YS('SW',work1,uarea,uvm,work2) + case('U2ES') + call grid_average_X2YS('S' ,work1,uarea,uvm,work2) + case('U2NS') + call grid_average_X2YS('W' ,work1,uarea,uvm,work2) + case('E2TS') + call grid_average_X2YS('W' ,work1,earea,epm,work2) + case('E2US') + call grid_average_X2YS('N' ,work1,earea,epm,work2) + case('E2NS') + call grid_average_X2YS('NW',work1,earea,epm,work2) + case('N2TS') + call grid_average_X2YS('S' ,work1,narea,npm,work2) + case('N2US') + call grid_average_X2YS('E' ,work1,narea,npm,work2) + case('N2ES') + call grid_average_X2YS('SE',work1,narea,npm,work2) + + ! state unmasked + case('T2UA') + call grid_average_X2YA('NE',work1,tarea,work2) + case('T2EA') + call grid_average_X2YA('E' ,work1,tarea,work2) + case('T2NA') + call grid_average_X2YA('N' ,work1,tarea,work2) + case('U2TA') + call grid_average_X2YA('SW',work1,uarea,work2) + case('U2EA') + call grid_average_X2YA('S' ,work1,uarea,work2) + case('U2NA') + call grid_average_X2YA('W' ,work1,uarea,work2) + case('E2TA') + call grid_average_X2YA('W' ,work1,earea,work2) + case('E2UA') + call grid_average_X2YA('N' ,work1,earea,work2) + case('E2NA') + call grid_average_X2YA('NW',work1,earea,work2) + case('N2TA') + call grid_average_X2YA('S' ,work1,narea,work2) + case('N2UA') + call grid_average_X2YA('E' ,work1,narea,work2) + case('N2EA') + call grid_average_X2YA('SE',work1,narea,work2) + + case default + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) + end select + + end subroutine grid_average_X2Y_1 + +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_1f(X2Y,work1,wght1,mask1,work2) + + character(len=*) , intent(in) :: & + X2Y + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:), & + wght1(:,:,:), & + mask1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=*), parameter :: subname = '(grid_average_X2Y_1f)' + + select case (trim(X2Y)) + +! don't support these for now, requires extra destination wght +! ! flux unmasked +! case('T2UF') +! call grid_average_X2YF('NE',work1,tarea,work2,uarea) +! case('T2EF') +! call grid_average_X2YF('E' ,work1,tarea,work2,earea) +! case('T2NF') +! call grid_average_X2YF('N' ,work1,tarea,work2,narea) +! case('U2TF') +! call grid_average_X2YF('SW',work1,uarea,work2,tarea) +! case('U2EF') +! call grid_average_X2YF('S' ,work1,uarea,work2,earea) +! case('U2NF') +! call grid_average_X2YF('W' ,work1,uarea,work2,narea) +! case('E2TF') +! call grid_average_X2YF('W' ,work1,earea,work2,tarea) +! case('E2UF') +! call grid_average_X2YF('N' ,work1,earea,work2,uarea) +! case('E2NF') +! call grid_average_X2YF('NW',work1,earea,work2,narea) +! case('N2TF') +! call grid_average_X2YF('S' ,work1,narea,work2,tarea) +! case('N2UF') +! call grid_average_X2YF('E' ,work1,narea,work2,uarea) +! case('N2EF') +! call grid_average_X2YF('SE',work1,narea,work2,earea) + + ! state masked + case('T2US') + call grid_average_X2YS('NE',work1,wght1,mask1,work2) + case('T2ES') + call grid_average_X2YS('E' ,work1,wght1,mask1,work2) + case('T2NS') + call grid_average_X2YS('N' ,work1,wght1,mask1,work2) + case('U2TS') + call grid_average_X2YS('SW',work1,wght1,mask1,work2) + case('U2ES') + call grid_average_X2YS('S' ,work1,wght1,mask1,work2) + case('U2NS') + call grid_average_X2YS('W' ,work1,wght1,mask1,work2) + case('E2TS') + call grid_average_X2YS('W' ,work1,wght1,mask1,work2) + case('E2US') + call grid_average_X2YS('N' ,work1,wght1,mask1,work2) + case('E2NS') + call grid_average_X2YS('NW',work1,wght1,mask1,work2) + case('N2TS') + call grid_average_X2YS('S' ,work1,wght1,mask1,work2) + case('N2US') + call grid_average_X2YS('E' ,work1,wght1,mask1,work2) + case('N2ES') + call grid_average_X2YS('SE',work1,wght1,mask1,work2) + + ! state unmasked + case('T2UA') + call grid_average_X2YA('NE',work1,wght1,work2) + case('T2EA') + call grid_average_X2YA('E' ,work1,wght1,work2) + case('T2NA') + call grid_average_X2YA('N' ,work1,wght1,work2) + case('U2TA') + call grid_average_X2YA('SW',work1,wght1,work2) + case('U2EA') + call grid_average_X2YA('S' ,work1,wght1,work2) + case('U2NA') + call grid_average_X2YA('W' ,work1,wght1,work2) + case('E2TA') + call grid_average_X2YA('W' ,work1,wght1,work2) + case('E2UA') + call grid_average_X2YA('N' ,work1,wght1,work2) + case('E2NA') + call grid_average_X2YA('NW',work1,wght1,work2) + case('N2TA') + call grid_average_X2YA('S' ,work1,wght1,work2) + case('N2UA') + call grid_average_X2YA('E' ,work1,wght1,work2) + case('N2EA') + call grid_average_X2YA('SE',work1,wght1,work2) + + case default + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) + end select + + end subroutine grid_average_X2Y_1f + +!======================================================================= +! Shifts quantities from one grid to another +! State masked version, simple area weighted averager +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) + + use ice_constants, only: c0 + + character(len=*) , intent(in) :: & + dir + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:), & + wght1(:,:,:), & + mask1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables integer (kind=int_kind) :: & i, j, iblk, & ilo,ihi,jlo,jhi ! beginning and end of physical domain + real (kind=dbl_kind) :: & + wtmp + + type (block) :: & + this_block ! block information for current block + + character(len=*), parameter :: subname = '(grid_average_X2YS)' + work2(:,:,:) = c0 - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi + select case (trim(dir)) - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p25 * & - (work1(i, j, iblk)*tarea(i, j, iblk) & - + work1(i+1,j, iblk)*tarea(i+1,j, iblk) & - + work1(i, j+1,iblk)*tarea(i, j+1,iblk) & - + work1(i+1,j+1,iblk)*tarea(i+1,j+1,iblk)) & - / uarea(i, j, iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO + case('NE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + mask1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + mask1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + mask1(i ,j+1,iblk)*work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + mask1(i+1,j+1,iblk)*work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j ,iblk)*work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i-1,j-1,iblk)*work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + mask1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i-1,j ,iblk)*work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j+1,iblk)*work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + mask1(i ,j+1,iblk)*work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('E') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i ,j,iblk)*wght1(i ,j,iblk) & + + mask1(i+1,j,iblk)*wght1(i+1,j,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i ,j,iblk)*work1(i ,j,iblk)*wght1(i ,j,iblk) & + + mask1(i+1,j,iblk)*work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('W') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + mask1(i ,j,iblk)*wght1(i ,j,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i-1,j,iblk)*work1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + mask1(i ,j,iblk)*work1(i ,j,iblk)*wght1(i ,j,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('N') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i,j ,iblk)*wght1(i,j ,iblk) & + + mask1(i,j+1,iblk)*wght1(i,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i,j ,iblk)*work1(i,j ,iblk)*wght1(i,j ,iblk) & + + mask1(i,j+1,iblk)*work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('S') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + mask1(i,j ,iblk)*wght1(i,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i,j-1,iblk)*work1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + mask1(i,j ,iblk)*work1(i,j ,iblk)*wght1(i,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case default + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) + end select - end subroutine to_ugrid + end subroutine grid_average_X2YS !======================================================================= +! Shifts quantities from one grid to another +! State unmasked version, simple weighted averager +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2YA(dir,work1,wght1,work2) + + use ice_constants, only: c0 + + character(len=*) , intent(in) :: & + dir + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:), & + wght1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) -! Transfer from U-cell centers to T-cell centers. Writes work into -! another array that has ghost cells -! NOTE: Input array is dimensioned only over physical cells. + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + wtmp + + type (block) :: & + this_block ! block information for current block + + character(len=*), parameter :: subname = '(grid_average_X2YA)' + + work2(:,:,:) = c0 + + select case (trim(dir)) + + case('NE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j ,iblk) & + + wght1(i+1,j ,iblk) & + + wght1(i ,j+1,iblk) & + + wght1(i+1,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j ,iblk) & + + wght1(i-1,j ,iblk) & + + wght1(i ,j-1,iblk) & + + wght1(i-1,j-1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i-1,j ,iblk) & + + wght1(i ,j ,iblk) & + + wght1(i-1,j+1,iblk) & + + wght1(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j-1,iblk) & + + wght1(i+1,j-1,iblk) & + + wght1(i ,j ,iblk) & + + wght1(i+1,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('E') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j,iblk) & + + wght1(i+1,j,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j,iblk)*wght1(i ,j,iblk) & + + work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('W') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i-1,j,iblk) & + + wght1(i ,j,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + work1(i ,j,iblk)*wght1(i ,j,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('N') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i,j ,iblk) & + + wght1(i,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i,j ,iblk)*wght1(i,j ,iblk) & + + work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('S') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i,j-1,iblk) & + + wght1(i,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + work1(i,j ,iblk)*wght1(i,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case default + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) + end select + + end subroutine grid_average_X2YA + +!======================================================================= +! Shifts quantities from one grid to another +! Flux masked, original implementation based on earlier t2u and u2t versions +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. ! -! author: Elizabeth C. Hunke, LANL +! author: T. Craig - subroutine u2tgrid_vector (work) + subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: field_loc_NEcorner, field_type_vector - use ice_domain_size, only: max_blocks + use ice_constants, only: c0, p25, p5 - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - work + character(len=*) , intent(in) :: & + dir + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:), & + wght1(:,:,:), & + wght2(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) ! local variables - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain - character(len=*), parameter :: subname = '(u2tgrid_vector)' + type (block) :: & + this_block ! block information for current block - work1(:,:,:) = work(:,:,:) + character(len=*), parameter :: subname = '(grid_average_X2YF)' - call ice_timer_start(timer_bound) - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_timer_stop(timer_bound) + work2(:,:,:) = c0 + + select case (trim(dir)) + + case('NE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p25 * & + (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & + / wght2(i ,j ,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p25 * & + (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & + / wght2(i ,j ,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p25 * & + (work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & + / wght2(i ,j ,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p25 * & + (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & + / wght2(i ,j ,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('E') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p5 * & + (work1(i ,j,iblk)*wght1(i ,j,iblk) & + + work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & + / wght2(i ,j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('W') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p5 * & + (work1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + work1(i ,j,iblk)*wght1(i ,j,iblk)) & + / wght2(i ,j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('N') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p5 * & + (work1(i,j ,iblk)*wght1(i,j ,iblk) & + + work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & + / wght2(i ,j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('S') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p5 * & + (work1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + work1(i,j ,iblk)*wght1(i,j ,iblk)) & + / wght2(i ,j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO - call to_tgrid(work1,work) + case default + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) + end select - end subroutine u2tgrid_vector + end subroutine grid_average_X2YF !======================================================================= +! Compute the minimum of adjacent values of a field at specific indices, +! depending on the grid location (U, E, N) +! + real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) result(mini) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + field ! field defined at T point + + integer (kind=int_kind), intent(in) :: & + i, j + + character(len=*), intent(in) :: & + grid_location ! grid location at which to compute the minumum (U, E, N) -! Shifts quantities from the U-cell midpoint (work1) to the T-cell -! midpoint (work2) + character(len=*), parameter :: subname = '(grid_neighbor_min)' + + select case (trim(grid_location)) + case('U') + mini = min(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) + case('E') + mini = min(field(i,j), field(i+1,j)) + case('N') + mini = min(field(i,j), field(i,j+1)) + case default + call abort_ice(subname // ' unknown grid_location: ' // grid_location) + end select + + end function grid_neighbor_min + +!======================================================================= +! Shifts quantities from one grid to another +! State masked version, simple weighted averager ! NOTE: Input array includes ghost cells that must be updated before ! calling this routine. ! -! author: Elizabeth C. Hunke, LANL +! author: T. Craig + + subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) - subroutine to_tgrid(work1, work2) + use ice_constants, only: c0 - use ice_constants, only: p25 + character(len=*) , intent(in) :: & + dir + + real (kind=dbl_kind), intent(in) :: & + work1a(:,:,:), work1b(:,:,:), & + wght1a(:,:,:), wght1b(:,:,:), & + mask1a(:,:,:), mask1b(:,:,:) - real (kind=dbl_kind) :: work1(nx_block,ny_block,max_blocks), & - work2(nx_block,ny_block,max_blocks) + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) ! local variables @@ -2058,33 +3449,104 @@ subroutine to_tgrid(work1, work2) i, j, iblk, & ilo,ihi,jlo,jhi ! beginning and end of physical domain + real (kind=dbl_kind) :: & + wtmp + type (block) :: & this_block ! block information for current block - - character(len=*), parameter :: subname = '(to_tgrid)' - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi + character(len=*), parameter :: subname = '(grid_average_X2YS_2)' - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p25 * & - (work1(i, j ,iblk) * uarea(i, j, iblk) & - + work1(i-1,j ,iblk) * uarea(i-1,j, iblk) & - + work1(i, j-1,iblk) * uarea(i, j-1,iblk) & - + work1(i-1,j-1,iblk) * uarea(i-1,j-1,iblk)) & - / tarea(i, j, iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO + work2(:,:,:) = c0 + + select case (trim(dir)) + + case('NE2US') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & + + mask1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & + + mask1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1a(i ,j ,iblk)*work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1a(i+1,j ,iblk)*work1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & + + mask1b(i ,j ,iblk)*work1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & + + mask1b(i ,j+1,iblk)*work1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NE2TS') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & + + mask1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & + + mask1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1a(i ,j-1,iblk)*work1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & + + mask1a(i ,j ,iblk)*work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1b(i-1,j ,iblk)*work1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & + + mask1b(i ,j ,iblk)*work1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO - end subroutine to_tgrid + case default + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) + end select + + end subroutine grid_average_X2YS_2 + +!======================================================================= +! Compute the maximum of adjacent values of a field at specific indices, +! depending on the grid location (U, E, N) +! + real(kind=dbl_kind) function grid_neighbor_max(field, i, j, grid_location) result(maxi) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + field ! field defined at T point + + integer (kind=int_kind), intent(in) :: & + i, j + + character(len=*), intent(in) :: & + grid_location ! grid location at which to compute the maximum (U, E, N) + + + character(len=*), parameter :: subname = '(grid_neighbor_max)' + + select case (trim(grid_location)) + case('U') + maxi = max(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) + case('E') + maxi = max(field(i,j), field(i+1,j)) + case('N') + maxi = max(field(i,j), field(i,j+1)) + case default + call abort_ice(subname // ' unknown grid_location: ' // grid_location) + end select + + end function grid_neighbor_max !======================================================================= ! The following code is used for obtaining the coordinates of the grid @@ -2150,12 +3612,12 @@ subroutine gridbox_corners latu_bounds(1,i,j,iblk)=TLAT(i ,j ,iblk)*rad_to_deg latu_bounds(2,i,j,iblk)=TLAT(i+1,j ,iblk)*rad_to_deg latu_bounds(3,i,j,iblk)=TLAT(i+1,j+1,iblk)*rad_to_deg - latu_bounds(4,i,j,iblk)=TLAT(i ,j+1,iblk)*rad_to_deg + latu_bounds(4,i,j,iblk)=TLAT(i ,j+1,iblk)*rad_to_deg lonu_bounds(1,i,j,iblk)=TLON(i ,j ,iblk)*rad_to_deg lonu_bounds(2,i,j,iblk)=TLON(i+1,j ,iblk)*rad_to_deg lonu_bounds(3,i,j,iblk)=TLON(i+1,j+1,iblk)*rad_to_deg - lonu_bounds(4,i,j,iblk)=TLON(i ,j+1,iblk)*rad_to_deg + lonu_bounds(4,i,j,iblk)=TLON(i ,j+1,iblk)*rad_to_deg enddo enddo @@ -2284,6 +3746,307 @@ subroutine gridbox_corners end subroutine gridbox_corners +!======================================================================= +! The following code is used for obtaining the coordinates of the grid +! vertices for CF-compliant netCDF history output. Approximate! +!======================================================================= + +! These fields are only used for netcdf history output, and the +! ghost cell values are not needed. +! NOTE: Extrapolations were used: these fields are approximate! +! + + subroutine gridbox_edges + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c2, c360, & + field_loc_NEcorner, field_type_scalar + use ice_domain_size, only: max_blocks + + integer (kind=int_kind) :: & + i,j,iblk,icorner,& ! index counters + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g2 + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + real (kind=dbl_kind) :: & + rad_to_deg + + type (block) :: & + this_block ! block information for current block + + character(len=*), parameter :: subname = '(gridbox_edges)' + + call icepack_query_parameters(rad_to_deg_out=rad_to_deg) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + !------------------------------------------------------------- + ! Get coordinates of grid boxes for each block as follows: + ! for N pt: (1) W edge, (2) E edge, (3) E edge j+1, (4) W edge j+1 + ! for E pt: (1) S edge, (2) S edge i+1, (3) N edge, i+1 (4) N edge + !------------------------------------------------------------- + + latn_bounds(:,:,:,:) = c0 + lonn_bounds(:,:,:,:) = c0 + late_bounds(:,:,:,:) = c0 + lone_bounds(:,:,:,:) = c0 + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + + latn_bounds(1,i,j,iblk)=ELAT(i-1,j ,iblk)*rad_to_deg + latn_bounds(2,i,j,iblk)=ELAT(i ,j ,iblk)*rad_to_deg + latn_bounds(3,i,j,iblk)=ELAT(i ,j+1,iblk)*rad_to_deg + latn_bounds(4,i,j,iblk)=ELAT(i-1,j+1,iblk)*rad_to_deg + + lonn_bounds(1,i,j,iblk)=ELON(i-1,j ,iblk)*rad_to_deg + lonn_bounds(2,i,j,iblk)=ELON(i ,j ,iblk)*rad_to_deg + lonn_bounds(3,i,j,iblk)=ELON(i ,j+1,iblk)*rad_to_deg + lonn_bounds(4,i,j,iblk)=ELON(i-1,j+1,iblk)*rad_to_deg + + late_bounds(1,i,j,iblk)=NLAT(i ,j-1,iblk)*rad_to_deg + late_bounds(2,i,j,iblk)=NLAT(i+1,j-1,iblk)*rad_to_deg + late_bounds(3,i,j,iblk)=NLAT(i+1,j ,iblk)*rad_to_deg + late_bounds(4,i,j,iblk)=NLAT(i ,j ,iblk)*rad_to_deg + + lone_bounds(1,i,j,iblk)=NLON(i ,j-1,iblk)*rad_to_deg + lone_bounds(2,i,j,iblk)=NLON(i+1,j-1,iblk)*rad_to_deg + lone_bounds(3,i,j,iblk)=NLON(i+1,j ,iblk)*rad_to_deg + lone_bounds(4,i,j,iblk)=NLON(i ,j ,iblk)*rad_to_deg + + enddo + enddo + enddo + !$OMP END PARALLEL DO + + !---------------------------------------------------------------- + ! extrapolate on global grid to get edge values + !---------------------------------------------------------------- + + if (my_task == master_task) then + allocate(work_g2(nx_global,ny_global)) + else + allocate(work_g2(1,1)) + endif + + ! latn_bounds + + work1(:,:,:) = latn_bounds(1,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do j = 1, ny_global + work_g2(1,j) = c2*work_g2(2,j) & + - work_g2(3,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + latn_bounds(1,:,:,:) = work1(:,:,:) + + work1(:,:,:) = latn_bounds(3,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + latn_bounds(3,:,:,:) = work1(:,:,:) + + work1(:,:,:) = latn_bounds(4,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + do j = 1, ny_global + work_g2(1,j) = c2*work_g2(2,j) & + - work_g2(3,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + latn_bounds(4,:,:,:) = work1(:,:,:) + + ! lonn_bounds + + work1(:,:,:) = lonn_bounds(1,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do j = 1, ny_global + work_g2(1,j) = c2*work_g2(2,j) & + - work_g2(3,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lonn_bounds(1,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lonn_bounds(3,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lonn_bounds(3,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lonn_bounds(4,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + do j = 1, ny_global + work_g2(1,j) = c2*work_g2(2,j) & + - work_g2(3,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lonn_bounds(4,:,:,:) = work1(:,:,:) + + ! late_bounds + + work1(:,:,:) = late_bounds(1,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,1) = c2*work_g2(i,2) & + - work_g2(i,3) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + late_bounds(1,:,:,:) = work1(:,:,:) + + work1(:,:,:) = late_bounds(2,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,1) = c2*work_g2(i,2) & + - work_g2(i,3) + enddo + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + late_bounds(2,:,:,:) = work1(:,:,:) + + work1(:,:,:) = late_bounds(3,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + late_bounds(3,:,:,:) = work1(:,:,:) + + ! lone_bounds + + work1(:,:,:) = lone_bounds(1,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,1) = c2*work_g2(i,2) & + - work_g2(i,3) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lone_bounds(1,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lone_bounds(2,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,1) = c2*work_g2(i,2) & + - work_g2(i,3) + enddo + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lone_bounds(2,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lone_bounds(3,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lone_bounds(3,:,:,:) = work1(:,:,:) + + deallocate(work_g2) + + !---------------------------------------------------------------- + ! Convert longitude to Degrees East >0 for history output + !---------------------------------------------------------------- + + allocate(work_g2(nx_block,ny_block)) ! not used as global here + !OMP fails in this loop + do iblk = 1, nblocks + do icorner = 1, 4 + work_g2(:,:) = lonn_bounds(icorner,:,:,iblk) + c360 + where (work_g2 > c360) work_g2 = work_g2 - c360 + where (work_g2 < c0 ) work_g2 = work_g2 + c360 + lonn_bounds(icorner,:,:,iblk) = work_g2(:,:) + work_g2(:,:) = lone_bounds(icorner,:,:,iblk) + c360 + where (work_g2 > c360) work_g2 = work_g2 - c360 + where (work_g2 < c0 ) work_g2 = work_g2 + c360 + lone_bounds(icorner,:,:,iblk) = work_g2(:,:) + enddo + enddo + deallocate(work_g2) + + end subroutine gridbox_edges + !======================================================================= ! NOTE: Boundary conditions for fields on NW, SW, SE corners @@ -2414,6 +4177,8 @@ end subroutine gridbox_verts subroutine get_bathymetry + use ice_constants, only: c0 + integer (kind=int_kind) :: & i, j, k, iblk ! loop indices @@ -2465,6 +4230,7 @@ subroutine get_bathymetry depth(k) = depth(k-1) + thick(k) enddo + bathymetry = c0 do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block diff --git a/cicecore/cicedynB/infrastructure/ice_memusage.F90 b/cicecore/cicedynB/infrastructure/ice_memusage.F90 new file mode 100644 index 000000000..19e7dfb15 --- /dev/null +++ b/cicecore/cicedynB/infrastructure/ice_memusage.F90 @@ -0,0 +1,151 @@ +! Provides methods for querying memory use + +MODULE ice_memusage + +!------------------------------------------------------------------------------- +! PURPOSE: memory use query methods +! Should call ice_memusage_init once before calling other interfaces +!------------------------------------------------------------------------------- + + use ice_kinds_mod, only : dbl_kind, log_kind + + implicit none + private + +! PUBLIC: Public interfaces + + public :: ice_memusage_getusage, & + ice_memusage_init, & + ice_memusage_print + + logical(log_kind), public :: memory_stats + +! PRIVATE DATA: + + real(dbl_kind) :: mb_blk = 1.0_dbl_kind + logical :: initset = .false. + +!=============================================================================== + +contains + +!=============================================================================== +! Initialize memory conversion to MB + +subroutine ice_memusage_init(iunit) + + implicit none + + !----- arguments ----- + + integer, optional :: iunit !< output unit number for optional writes + + !----- local ----- + + ! --- Memory stats --- + integer :: msize ! memory size (high water) + integer :: mrss ! resident size (current memory use) + integer :: msize0,msize1 ! temporary size + integer :: mrss0,mrss1,mrss2 ! temporary rss + integer :: mshare,mtext,mdatastack + integer :: ierr + + integer :: ice_memusage_gptl + + real(dbl_kind),allocatable :: mem_tmp(:) + character(*),parameter :: subname = '(ice_memusage_init)' + + !--------------------------------------------------- + + ! return if memory_stats are off + if (.not. memory_stats) return + + ierr = ice_memusage_gptl (msize, mrss0, mshare, mtext, mdatastack) + allocate(mem_tmp(1024*1024)) ! 1 MWord, 8 MB + mem_tmp = -1.0 + ierr = ice_memusage_gptl (msize, mrss1, mshare, mtext, mdatastack) + deallocate(mem_tmp) + ierr = ice_memusage_gptl (msize, mrss2, mshare, mtext, mdatastack) + mb_blk = 1.0_dbl_kind + if (mrss1 - mrss0 > 0) then + mb_blk = (8.0_dbl_kind)/((mrss1-mrss0)*1.0_dbl_kind) + initset = .true. + endif + + if (present(iunit)) then + write(iunit,'(A,l4)') subname//' Initset conversion flag is ',initset + write(iunit,'(A,f16.2)') subname//' 8 MB memory alloc in MB is ',(mrss1-mrss0)*mb_blk + write(iunit,'(A,f16.2)') subname//' 8 MB memory dealloc in MB is ',(mrss1-mrss2)*mb_blk + write(iunit,'(A,f16.2)') subname//' Memory block size conversion in bytes is ',mb_blk*1024_dbl_kind*1024.0_dbl_kind + endif + +end subroutine ice_memusage_init + +!=============================================================================== +! Determine memory use + +subroutine ice_memusage_getusage(r_msize,r_mrss) + + implicit none + + !----- arguments --- + real(dbl_kind),intent(out) :: r_msize !< memory usage value + real(dbl_kind),intent(out) :: r_mrss !< memory usage value + + !----- local --- + integer :: msize,mrss + integer :: mshare,mtext,mdatastack + integer :: ierr + integer :: ice_memusage_gptl + character(*),parameter :: subname = '(ice_memusage_getusage)' + + !--------------------------------------------------- + + ! return if memory_stats are off + if (.not. memory_stats) return + + ierr = ice_memusage_gptl (msize, mrss, mshare, mtext, mdatastack) + r_msize = msize*mb_blk + r_mrss = mrss*mb_blk + +end subroutine ice_memusage_getusage + +!=============================================================================== +! Print memory use + +subroutine ice_memusage_print(iunit,string) + + implicit none + + !----- arguments --- + integer, intent(in) :: iunit !< unit number to write to + character(len=*),optional, intent(in) :: string !< optional string + + !----- local --- + real(dbl_kind) :: msize,mrss + character(len=128) :: lstring + character(*),parameter :: subname = '(ice_memusage_print)' + + !--------------------------------------------------- + + ! return if memory_stats are off + if (.not. memory_stats) return + + lstring = ' ' + if (present(string)) then + lstring = string + endif + + call ice_memusage_getusage(msize,mrss) + + if (initset) then + write(iunit,'(2a,2f14.4,1x,a)') subname,' memory use (MB) = ',msize,mrss,trim(lstring) + else + write(iunit,'(2a,2f14.4,1x,a)') subname,' memory use (??) = ',msize,mrss,trim(lstring) + endif + +end subroutine ice_memusage_print + +!=============================================================================== + +END MODULE ice_memusage diff --git a/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c b/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c new file mode 100644 index 000000000..ec9c2c1d8 --- /dev/null +++ b/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c @@ -0,0 +1,239 @@ +/* +** This file was downloaded and modified from https://github.com/jmrosinski/GPTL +** with the following Copyright permission, +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and associated documentation files (the “Software”), to deal +** in the Software for any noncommercial purposes without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Software, and to permit +** persons to whom the Software is furnished to do so, subject to the following +** conditions: The above copyright notice and this permission notice shall be +** included in all copies or substantial portions of the Software. Any +** commercial use (including sale) of the software, and derivative development +** towards commercial use, requires written permission of the copyright +** holder. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO +** EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES +** OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +** ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +** DEALINGS IN THE SOFTWARE. +** +*/ + +/* +** $Id: get_memusage.c,v 1.10 2010-11-09 19:08:53 rosinski Exp $ +** +** Author: Jim Rosinski +** Credit to Chuck Bardeen for MACOS section (__APPLE__ ifdef) +** +** get_memusage: +** +** Designed to be called from Fortran, returns information about memory +** usage in each of 5 input int* args. On Linux read from the /proc +** filesystem because getrusage() returns placebos (zeros). Return -1 for +** values which are unavailable or ambiguous on a particular architecture. +** +** Return value: 0 = success +** -1 = failure +*/ + +#define _NO_CHANGE 0 +#define _UPPER_CASE 1 +#define _ADD_UNDERSCORE 2 +#define _ADD_TWO_UNDERSCORES 3 + +#ifdef FORTRANUNDERSCORE +#define NAMING _ADD_UNDERSCORE +#endif + +#ifdef FORTRANDOUBLEUNDERSCORE +#define NAMING _ADD_TWO_UNDERSCORES +#endif + +#ifdef FORTRANCAPS +#define NAMING _UPPER_CASE +#endif + +#ifndef NAMING +#define NAMING _NO_CHANGE +#endif + +#if (NAMING == _ADD_UNDERSCORE) +#define ice_memusage_gptl ice_memusage_gptl_ +#endif + +#if (NAMING == _ADD_TWO_UNDERSCORES) +#define ice_memusage_gptl ice_memusage_gptl__ +#endif + +#if (NAMING == _UPPER_CASE) +#define ice_memusage_gptl ICE_MEMUSAGE_GPTL +#endif + + +#include + +/*#include "gptl.h" */ /* additional cpp defs and function prototypes */ +/* extern int ice_memusage_gptl (int *, int *, int *, int *, int *); */ + +/* _AIX is automatically defined when using the AIX C compilers */ +#ifdef _AIX +#include +#endif + +#ifdef IRIX64 +#include +#endif + +#ifdef HAVE_SLASHPROC + +#include +#include +#include +#include + +#elif (defined __APPLE__) + +#include +#include +#include + +#endif + +#ifdef BGP + +#include +#include +#include +#include +#define Personality _BGP_Personality_t + +#endif + +#ifdef BGQ + +#include +#include + +#endif + +int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastack) +{ +#if defined (BGP) || defined(BGQ) + + long long alloc; + struct mallinfo m; +#if defined (BGP) + Personality pers; +#endif +#if defined (BGQ) + uint64_t shared_mem_count; +#endif + long long total; + int node_config; + + /* memory available */ +#if defined(BGP) + Kernel_GetPersonality(&pers, sizeof(pers)); + total = BGP_Personality_DDRSizeMB(&pers); + + node_config = BGP_Personality_processConfig(&pers); + if (node_config == _BGP_PERS_PROCESSCONFIG_VNM) total /= 4; + else if (node_config == _BGP_PERS_PROCESSCONFIG_2x2) total /= 2; + total *= 1024*1024; + + *size = total; +#endif + +#if defined(BGQ) + Kernel_GetMemorySize(KERNEL_MEMSIZE_SHARED, &shared_mem_count); + + shared_mem_count *= 1024*1024; + *size = shared_mem_count; + +#endif + /* total memory used - heap only (not static memory)*/ + + m = mallinfo(); + alloc = m.hblkhd + m.uordblks; + + *rss = alloc; + *share = -1; + *text = -1; + *datastack = -1; + + +#elif (defined HAVE_SLASHPROC) + FILE *fd; /* file descriptor for fopen */ + int pid; /* process id */ + static char *head = "/proc/"; /* part of path */ + static char *tail = "/statm"; /* part of path */ + char file[19]; /* full path to file in /proc */ + int dum; /* placeholder for unused return arguments */ + int ret; /* function return value */ + + /* + ** The file we want to open is /proc//statm + */ + + pid = (int) getpid (); + if (pid > 999999) { + fprintf (stderr, "get_memusage: pid %d is too large\n", pid); + return -1; + } + + sprintf (file, "%s%d%s", head, pid, tail); + if ((fd = fopen (file, "r")) < 0) { + fprintf (stderr, "get_memusage: bad attempt to open %s\n", file); + return -1; + } + + /* + ** Read the desired data from the /proc filesystem directly into the output + ** arguments, close the file and return. + */ + + ret = fscanf (fd, "%d %d %d %d %d %d %d", + size, rss, share, text, datastack, &dum, &dum); + ret = fclose (fd); + return 0; + +#elif (defined __APPLE__) + + FILE *fd; + char cmd[60]; + int pid = (int) getpid (); + + sprintf (cmd, "ps -o vsz -o rss -o tsiz -p %d | grep -v RSS", pid); + fd = popen (cmd, "r"); + + if (fd) { + fscanf (fd, "%d %d %d", size, rss, text); + *share = -1; + *datastack = -1; + (void) pclose (fd); + } + + return 0; + +#else + + struct rusage usage; /* structure filled in by getrusage */ + + if (getrusage (RUSAGE_SELF, &usage) < 0) + return -1; + + *size = -1; + *rss = usage.ru_maxrss; + *share = -1; + *text = -1; + *datastack = -1; +#ifdef IRIX64 + *datastack = usage.ru_idrss + usage.ru_isrss; +#endif + return 0; + +#endif +} diff --git a/cicecore/cicedynB/infrastructure/ice_read_write.F90 b/cicecore/cicedynB/infrastructure/ice_read_write.F90 index bf0361cf1..d5cbe1768 100644 --- a/cicecore/cicedynB/infrastructure/ice_read_write.F90 +++ b/cicecore/cicedynB/infrastructure/ice_read_write.F90 @@ -1058,6 +1058,7 @@ subroutine ice_open_nc(filename, fid) status = nf90_open(filename, NF90_NOWRITE, fid) if (status /= nf90_noerr) then + !write(nu_diag,*) subname,' NF90_STRERROR = ',trim(nf90_strerror(status)) call abort_ice(subname//' ERROR: Cannot open '//trim(filename), & file=__FILE__, line=__LINE__) endif @@ -1670,7 +1671,7 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & amin = minval(work_g1(:,:,n)) amax = maxval(work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) asum = sum (work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) - write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum + write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum, trim(varname) enddo endif diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index ebbef60e0..2e236b62a 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -19,8 +19,10 @@ module ice_restart_driver use ice_kinds_mod use ice_arrays_column, only: oceanmixed_ice + use ice_communicate, only: my_task, master_task use ice_constants, only: c0, c1, p5, & field_loc_center, field_loc_NEcorner, & + field_loc_Eface, field_loc_Nface, & field_type_scalar, field_type_vector use ice_restart_shared, only: restart_dir, pointer_file, & runid, use_restart_time, lenstr, restart_coszen @@ -54,12 +56,16 @@ subroutine dumpfile(filename_spec) use ice_domain, only: nblocks use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, iceumask, & + strocnxT, strocnyT, sst, frzmlt, iceumask, iceemask, icenmask, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4 + stress12_1, stress12_2, stress12_3, stress12_4, & + stresspT, stressmT, stress12T, & + stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel + use ice_grid, only: grid_ice, tmask + use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel, & + uvelE, vvelE, uvelN, vvelN character(len=char_len_long), intent(in), optional :: filename_spec @@ -92,6 +98,20 @@ subroutine dumpfile(filename_spec) diag = .true. + !----------------------------------------------------------------- + ! Zero out tracers over land + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (.not. tmask(i,j,iblk)) trcrn(i,j,:,:,iblk) = c0 + enddo + enddo + enddo + !$OMP END PARALLEL DO + !----------------------------------------------------------------- ! state variables ! Tsfc is the only tracer written to binary files. All other @@ -127,6 +147,18 @@ subroutine dumpfile(filename_spec) call write_restart_field(nu_dump,0,uvel,'ruf8','uvel',1,diag) call write_restart_field(nu_dump,0,vvel,'ruf8','vvel',1,diag) + if (grid_ice == 'CD') then + call write_restart_field(nu_dump,0,uvelE,'ruf8','uvelE',1,diag) + call write_restart_field(nu_dump,0,vvelE,'ruf8','vvelE',1,diag) + call write_restart_field(nu_dump,0,uvelN,'ruf8','uvelN',1,diag) + call write_restart_field(nu_dump,0,vvelN,'ruf8','vvelN',1,diag) + endif + + if (grid_ice == 'C') then + call write_restart_field(nu_dump,0,uvelE,'ruf8','uvelE',1,diag) + call write_restart_field(nu_dump,0,vvelN,'ruf8','vvelN',1,diag) + endif + !----------------------------------------------------------------- ! radiation fields !----------------------------------------------------------------- @@ -164,6 +196,21 @@ subroutine dumpfile(filename_spec) call write_restart_field(nu_dump,0,stress12_2,'ruf8','stress12_2',1,diag) call write_restart_field(nu_dump,0,stress12_4,'ruf8','stress12_4',1,diag) + if (grid_ice == 'CD') then + call write_restart_field(nu_dump,0,stresspT ,'ruf8','stresspT' ,1,diag) + call write_restart_field(nu_dump,0,stressmT ,'ruf8','stressmT' ,1,diag) + call write_restart_field(nu_dump,0,stress12T,'ruf8','stress12T',1,diag) + call write_restart_field(nu_dump,0,stresspU ,'ruf8','stresspU' ,1,diag) + call write_restart_field(nu_dump,0,stressmU ,'ruf8','stressmU' ,1,diag) + call write_restart_field(nu_dump,0,stress12U,'ruf8','stress12U',1,diag) + endif + + if (grid_ice == 'C') then + call write_restart_field(nu_dump,0,stresspT ,'ruf8','stresspT' ,1,diag) + call write_restart_field(nu_dump,0,stressmT ,'ruf8','stressmT' ,1,diag) + call write_restart_field(nu_dump,0,stress12U,'ruf8','stress12U',1,diag) + endif + !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- @@ -180,6 +227,34 @@ subroutine dumpfile(filename_spec) !$OMP END PARALLEL DO call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (icenmask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (iceemask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + + endif + ! for mixed layer model if (oceanmixed_ice) then call write_restart_field(nu_dump,0,sst,'ruf8','sst',1,diag) @@ -198,19 +273,21 @@ subroutine restartfile (ice_ic) use ice_boundary, only: ice_HaloUpdate_stress use ice_blocks, only: nghost, nx_block, ny_block use ice_calendar, only: istep0, npt, calendar - use ice_communicate, only: my_task, master_task use ice_domain, only: nblocks, halo_info use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, iceumask, & + strocnxT, strocnyT, sst, frzmlt, iceumask, iceemask, icenmask, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4 + stress12_1, stress12_2, stress12_3, stress12_4, & + stresspT, stressmT, stress12T, & + stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: tmask, grid_type + use ice_grid, only: tmask, grid_type, grid_ice use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & + uvelE, vvelE, uvelN, vvelN, & trcr_base, nt_strata, n_trcr_strata character (*), optional :: ice_ic @@ -300,6 +377,30 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,vvel,'ruf8', & 'vvel',1,diag,field_loc_NEcorner, field_type_vector) + if (grid_ice == 'CD') then + if (query_field(nu_restart,'uvelE')) & + call read_restart_field(nu_restart,0,uvelE,'ruf8', & + 'uvelE',1,diag,field_loc_Eface, field_type_vector) + if (query_field(nu_restart,'vvelE')) & + call read_restart_field(nu_restart,0,vvelE,'ruf8', & + 'vvelE',1,diag,field_loc_Eface, field_type_vector) + if (query_field(nu_restart,'uvelN')) & + call read_restart_field(nu_restart,0,uvelN,'ruf8', & + 'uvelN',1,diag,field_loc_Nface, field_type_vector) + if (query_field(nu_restart,'vvelN')) & + call read_restart_field(nu_restart,0,vvelN,'ruf8', & + 'vvelN',1,diag,field_loc_Nface, field_type_vector) + endif + + if (grid_ice == 'C') then + if (query_field(nu_restart,'uvelE')) & + call read_restart_field(nu_restart,0,uvelE,'ruf8', & + 'uvelE',1,diag,field_loc_Eface, field_type_vector) + if (query_field(nu_restart,'vvelN')) & + call read_restart_field(nu_restart,0,vvelN,'ruf8', & + 'vvelN',1,diag,field_loc_Nface, field_type_vector) + endif + !----------------------------------------------------------------- ! radiation fields !----------------------------------------------------------------- @@ -367,6 +468,27 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,stress12_4,'ruf8', & 'stress12_4',1,diag,field_loc_center,field_type_scalar) ! stress12_4 + if (grid_ice == 'CD' .or. grid_ice == 'C') then + if (query_field(nu_restart,'stresspT')) & + call read_restart_field(nu_restart,0,stresspT,'ruf8', & + 'stresspT' ,1,diag,field_loc_center,field_type_scalar) ! stresspT + if (query_field(nu_restart,'stressmT')) & + call read_restart_field(nu_restart,0,stressmT,'ruf8', & + 'stressmT' ,1,diag,field_loc_center,field_type_scalar) ! stressmT + if (query_field(nu_restart,'stress12T')) & + call read_restart_field(nu_restart,0,stress12T,'ruf8', & + 'stress12T',1,diag,field_loc_center,field_type_scalar) ! stress12T + if (query_field(nu_restart,'stresspU')) & + call read_restart_field(nu_restart,0,stresspU,'ruf8', & + 'stresspU' ,1,diag,field_loc_NEcorner,field_type_scalar) ! stresspU + if (query_field(nu_restart,'stressmU')) & + call read_restart_field(nu_restart,0,stressmU,'ruf8', & + 'stressmU' ,1,diag,field_loc_NEcorner,field_type_scalar) ! stressmU + if (query_field(nu_restart,'stress12U')) & + call read_restart_field(nu_restart,0,stress12U,'ruf8', & + 'stress12U',1,diag,field_loc_NEcorner,field_type_scalar) ! stress12U + endif + if (trim(grid_type) == 'tripole') then call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info, & field_loc_center, field_type_scalar) @@ -394,6 +516,7 @@ subroutine restartfile (ice_ic) field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & field_loc_center, field_type_scalar) + ! TODO: CD-grid endif !----------------------------------------------------------------- @@ -416,6 +539,42 @@ subroutine restartfile (ice_ic) enddo !$OMP END PARALLEL DO + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + if (query_field(nu_restart,'icenmask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'icenmask',1,diag,field_loc_center, field_type_scalar) + + icenmask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) icenmask(i,j,iblk) = .true. + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif + + if (query_field(nu_restart,'iceemask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceemask',1,diag,field_loc_center, field_type_scalar) + + iceemask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceemask(i,j,iblk) = .true. + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif + + endif + ! set Tsfcn to c0 on land !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks @@ -476,6 +635,7 @@ subroutine restartfile (ice_ic) stress12_4(i,j,iblk) = c0 enddo enddo + ! TODO: CD-grid ? enddo !$OMP END PARALLEL DO @@ -543,7 +703,6 @@ subroutine restartfile_v4 (ice_ic) use ice_blocks, only: nghost, nx_block, ny_block use ice_calendar, only: istep0, istep1, timesecs, calendar, npt, & set_date_from_timesecs - use ice_communicate, only: my_task, master_task use ice_domain, only: nblocks, distrb_info use ice_domain_size, only: nilyr, nslyr, ncat, nx_global, ny_global, & max_blocks diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 index a6f42a6a5..5dd35fdf4 100644 --- a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 @@ -30,7 +30,8 @@ module ice_restart implicit none private public :: init_restart_write, init_restart_read, & - read_restart_field, write_restart_field, final_restart + read_restart_field, write_restart_field, final_restart, & + query_field real(kind=dbl_kind) :: time_forc = -99. ! historic now local @@ -892,6 +893,24 @@ subroutine final_restart() end subroutine final_restart +!======================================================================= + +! Inquire field existance, doesn't work in binary files so set to true and return +! author T. Craig + + logical function query_field(nu,vname) + + integer (kind=int_kind), intent(in) :: nu ! unit number + character (len=*) , intent(in) :: vname ! variable name + + ! local variables + + character(len=*), parameter :: subname = '(query_field)' + + query_field = .true. + + end function query_field + !======================================================================= end module ice_restart diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 index 493a91c1e..5587f2b6b 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -54,9 +54,11 @@ subroutine ice_write_hist (ns) use ice_domain, only: distrb_info use ice_domain_size, only: nx_global, ny_global, max_nstrm, max_blocks use ice_gather_scatter, only: gather_global - use ice_grid, only: TLON, TLAT, ULON, ULAT, hm, bm, tarea, uarea, & - dxu, dxt, dyu, dyt, HTN, HTE, ANGLE, ANGLET, & - lont_bounds, latt_bounds, lonu_bounds, latu_bounds + use ice_grid, only: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT, & + hm, uvm, npm, epm, bm, tarea, uarea, narea, earea, & + dxu, dxt, dyu, dyt, dxn, dyn, dxe, dye, HTN, HTE, ANGLE, ANGLET, & + lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & + lonn_bounds, latn_bounds, lone_bounds, late_bounds use ice_history_shared use ice_restart_shared, only: runid, lcdf64 #ifdef USE_NETCDF @@ -91,15 +93,15 @@ subroutine ice_write_hist (ns) character (char_len) :: start_time,current_date,current_time character (len=8) :: cdate - ! 4 coordinate variables: TLON, TLAT, ULON, ULAT - INTEGER (kind=int_kind), PARAMETER :: ncoord = 4 + ! 8 coordinate variables: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT + INTEGER (kind=int_kind), PARAMETER :: ncoord = 8 ! 4 vertices in each grid cell INTEGER (kind=int_kind), PARAMETER :: nverts = 4 - ! 4 variables describe T, U grid boundaries: + ! 8 variables describe T, U grid boundaries: ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds - INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 4 + INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 8 TYPE coord_attributes ! netcdf coordinate attributes character (len=11) :: short_name @@ -112,10 +114,10 @@ subroutine ice_write_hist (ns) character (len=20) :: coordinates END TYPE req_attributes - TYPE(req_attributes), dimension(nvar) :: var - TYPE(coord_attributes), dimension(ncoord) :: coord_var + TYPE(req_attributes), dimension(nvar_grd) :: var_grd + TYPE(coord_attributes), dimension(ncoord) :: var_coord TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts - TYPE(coord_attributes), dimension(nvarz) :: var_nz + TYPE(coord_attributes), dimension(nvar_grdz) :: var_grdz CHARACTER (char_len), dimension(ncoord) :: coord_bounds character(len=*), parameter :: subname = '(ice_write_hist)' @@ -270,65 +272,118 @@ subroutine ice_write_hist (ns) ind = 0 ind = ind + 1 - coord_var(ind) = coord_attributes('TLON', & + var_coord(ind) = coord_attributes('TLON', & 'T grid center longitude', 'degrees_east') coord_bounds(ind) = 'lont_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('TLAT', & + var_coord(ind) = coord_attributes('TLAT', & 'T grid center latitude', 'degrees_north') coord_bounds(ind) = 'latt_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('ULON', & + var_coord(ind) = coord_attributes('ULON', & 'U grid center longitude', 'degrees_east') coord_bounds(ind) = 'lonu_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('ULAT', & + var_coord(ind) = coord_attributes('ULAT', & 'U grid center latitude', 'degrees_north') coord_bounds(ind) = 'latu_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLON', & + 'N grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lonn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLAT', & + 'N grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELON', & + 'E grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lone_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELAT', & + 'E grid center latitude', 'degrees_north') + coord_bounds(ind) = 'late_bounds' - var_nz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') - var_nz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') - var_nz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') - var_nz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') - var_nz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') - var_nz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') + var_grdz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') + var_grdz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') + var_grdz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') + var_grdz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') + var_grdz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') + var_grdz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') !----------------------------------------------------------------- ! define information for optional time-invariant variables !----------------------------------------------------------------- - var(n_tarea)%req = coord_attributes('tarea', & + var_grd(n_tmask)%req = coord_attributes('tmask', & + 'mask of T grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_tmask)%coordinates = 'TLON TLAT' + var_grd(n_umask)%req = coord_attributes('umask', & + 'mask of U grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_umask)%coordinates = 'ULON ULAT' + var_grd(n_nmask)%req = coord_attributes('nmask', & + 'mask of N grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_nmask)%coordinates = 'NLON NLAT' + var_grd(n_emask)%req = coord_attributes('emask', & + 'mask of E grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_emask)%coordinates = 'ELON ELAT' + + var_grd(n_tarea)%req = coord_attributes('tarea', & 'area of T grid cells', 'm^2') - var(n_tarea)%coordinates = 'TLON TLAT' - var(n_uarea)%req = coord_attributes('uarea', & + var_grd(n_tarea)%coordinates = 'TLON TLAT' + var_grd(n_uarea)%req = coord_attributes('uarea', & 'area of U grid cells', 'm^2') - var(n_uarea)%coordinates = 'ULON ULAT' - var(n_dxt)%req = coord_attributes('dxt', & + var_grd(n_uarea)%coordinates = 'ULON ULAT' + var_grd(n_narea)%req = coord_attributes('narea', & + 'area of N grid cells', 'm^2') + var_grd(n_narea)%coordinates = 'NLON NLAT' + var_grd(n_earea)%req = coord_attributes('earea', & + 'area of E grid cells', 'm^2') + var_grd(n_earea)%coordinates = 'ELON ELAT' + + var_grd(n_blkmask)%req = coord_attributes('blkmask', & + 'block id of T grid cells, mytask + iblk/100', 'unitless') + var_grd(n_blkmask)%coordinates = 'TLON TLAT' + + var_grd(n_dxt)%req = coord_attributes('dxt', & 'T cell width through middle', 'm') - var(n_dxt)%coordinates = 'TLON TLAT' - var(n_dyt)%req = coord_attributes('dyt', & + var_grd(n_dxt)%coordinates = 'TLON TLAT' + var_grd(n_dyt)%req = coord_attributes('dyt', & 'T cell height through middle', 'm') - var(n_dyt)%coordinates = 'TLON TLAT' - var(n_dxu)%req = coord_attributes('dxu', & + var_grd(n_dyt)%coordinates = 'TLON TLAT' + var_grd(n_dxu)%req = coord_attributes('dxu', & 'U cell width through middle', 'm') - var(n_dxu)%coordinates = 'ULON ULAT' - var(n_dyu)%req = coord_attributes('dyu', & + var_grd(n_dxu)%coordinates = 'ULON ULAT' + var_grd(n_dyu)%req = coord_attributes('dyu', & 'U cell height through middle', 'm') - var(n_dyu)%coordinates = 'ULON ULAT' - var(n_HTN)%req = coord_attributes('HTN', & + var_grd(n_dyu)%coordinates = 'ULON ULAT' + var_grd(n_dxn)%req = coord_attributes('dxn', & + 'N cell width through middle', 'm') + var_grd(n_dxn)%coordinates = 'NLON NLAT' + var_grd(n_dyn)%req = coord_attributes('dyn', & + 'N cell height through middle', 'm') + var_grd(n_dyn)%coordinates = 'NLON NLAT' + var_grd(n_dxe)%req = coord_attributes('dxe', & + 'E cell width through middle', 'm') + var_grd(n_dxe)%coordinates = 'ELON ELAT' + var_grd(n_dye)%req = coord_attributes('dye', & + 'E cell height through middle', 'm') + var_grd(n_dye)%coordinates = 'ELON ELAT' + + var_grd(n_HTN)%req = coord_attributes('HTN', & 'T cell width on North side','m') - var(n_HTN)%coordinates = 'TLON TLAT' - var(n_HTE)%req = coord_attributes('HTE', & + var_grd(n_HTN)%coordinates = 'TLON TLAT' + var_grd(n_HTE)%req = coord_attributes('HTE', & 'T cell width on East side', 'm') - var(n_HTE)%coordinates = 'TLON TLAT' - var(n_ANGLE)%req = coord_attributes('ANGLE', & + var_grd(n_HTE)%coordinates = 'TLON TLAT' + var_grd(n_ANGLE)%req = coord_attributes('ANGLE', & 'angle grid makes with latitude line on U grid', & 'radians') - var(n_ANGLE)%coordinates = 'ULON ULAT' - var(n_ANGLET)%req = coord_attributes('ANGLET', & + var_grd(n_ANGLE)%coordinates = 'ULON ULAT' + var_grd(n_ANGLET)%req = coord_attributes('ANGLET', & 'angle grid makes with latitude line on T grid', & 'radians') - var(n_ANGLET)%coordinates = 'TLON TLAT' + var_grd(n_ANGLET)%coordinates = 'TLON TLAT' ! These fields are required for CF compliance ! dimensions (nx,ny,nverts) @@ -340,6 +395,14 @@ subroutine ice_write_hist (ns) 'longitude boundaries of U cells', 'degrees_east') var_nverts(n_latu_bnds) = coord_attributes('latu_bounds', & 'latitude boundaries of U cells', 'degrees_north') + var_nverts(n_lonn_bnds) = coord_attributes('lonn_bounds', & + 'longitude boundaries of N cells', 'degrees_east') + var_nverts(n_latn_bnds) = coord_attributes('latn_bounds', & + 'latitude boundaries of N cells', 'degrees_north') + var_nverts(n_lone_bnds) = coord_attributes('lone_bounds', & + 'longitude boundaries of E cells', 'degrees_east') + var_nverts(n_late_bnds) = coord_attributes('late_bounds', & + 'latitude boundaries of E cells', 'degrees_north') !----------------------------------------------------------------- ! define attributes for time-invariant variables @@ -350,28 +413,28 @@ subroutine ice_write_hist (ns) dimid(3) = timid do i = 1, ncoord - status = nf90_def_var(ncid, coord_var(i)%short_name, lprecision, & + status = nf90_def_var(ncid, var_coord(i)%short_name, lprecision, & dimid(1:2), varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining short_name for '//coord_var(i)%short_name) - status = nf90_put_att(ncid,varid,'long_name',coord_var(i)%long_name) + 'ERROR: defining short_name for '//var_coord(i)%short_name) + status = nf90_put_att(ncid,varid,'long_name',var_coord(i)%long_name) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//coord_var(i)%short_name) - status = nf90_put_att(ncid, varid, 'units', coord_var(i)%units) + 'ERROR: defining long_name for '//var_coord(i)%short_name) + status = nf90_put_att(ncid, varid, 'units', var_coord(i)%units) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//coord_var(i)%short_name) - call ice_write_hist_fill(ncid,varid,coord_var(i)%short_name,history_precision) - if (coord_var(i)%short_name == 'ULAT') then + 'ERROR: defining units for '//var_coord(i)%short_name) + call ice_write_hist_fill(ncid,varid,var_coord(i)%short_name,history_precision) + if (var_coord(i)%short_name == 'ULAT') then status = nf90_put_att(ncid,varid,'comment', & 'Latitude of NE corner of T grid cell') if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining comment for '//coord_var(i)%short_name) + 'ERROR: defining comment for '//var_coord(i)%short_name) endif if (f_bounds) then status = nf90_put_att(ncid, varid, 'bounds', coord_bounds(i)) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining bounds for '//coord_var(i)%short_name) - endif + 'ERROR: defining bounds for '//var_coord(i)%short_name) + endif enddo ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR, NZALYR, NFSD) @@ -382,62 +445,37 @@ subroutine ice_write_hist (ns) dimidex(5)=kmtida dimidex(6)=fmtid - do i = 1, nvarz + do i = 1, nvar_grdz if (igrdz(i)) then - status = nf90_def_var(ncid, var_nz(i)%short_name, & + status = nf90_def_var(ncid, var_grdz(i)%short_name, & lprecision, dimidex(i), varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining short_name for '//var_nz(i)%short_name) - status = nf90_put_att(ncid,varid,'long_name',var_nz(i)%long_name) + 'ERROR: defining short_name for '//var_grdz(i)%short_name) + status = nf90_put_att(ncid,varid,'long_name',var_grdz(i)%long_name) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//var_nz(i)%short_name) - status = nf90_put_att(ncid, varid, 'units', var_nz(i)%units) + 'ERROR: defining long_name for '//var_grdz(i)%short_name) + status = nf90_put_att(ncid, varid, 'units', var_grdz(i)%units) if (Status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//var_nz(i)%short_name) + 'ERROR: defining units for '//var_grdz(i)%short_name) endif enddo - ! Attributes for tmask, blkmask defined separately, since they have no units - if (igrd(n_tmask)) then - status = nf90_def_var(ncid, 'tmask', lprecision, dimid(1:2), varid) - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: defining var tmask') - status = nf90_put_att(ncid,varid, 'long_name', 'ocean grid mask') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: tmask long_name') - status = nf90_put_att(ncid, varid, 'coordinates', 'TLON TLAT') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: tmask units') - status = nf90_put_att(ncid,varid,'comment', '0 = land, 1 = ocean') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: tmask comment') - call ice_write_hist_fill(ncid,varid,'tmask',history_precision) - endif - - if (igrd(n_blkmask)) then - status = nf90_def_var(ncid, 'blkmask', lprecision, dimid(1:2), varid) - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: defining var blkmask') - status = nf90_put_att(ncid,varid, 'long_name', 'ice grid block mask') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: blkmask long_name') - status = nf90_put_att(ncid, varid, 'coordinates', 'TLON TLAT') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: blkmask units') - status = nf90_put_att(ncid,varid,'comment', 'mytask + iblk/100') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: blkmask comment') - call ice_write_hist_fill(ncid,varid,'blkmask',history_precision) - endif - - do i = 3, nvar ! note n_tmask=1, n_blkmask=2 + do i = 1, nvar_grd if (igrd(i)) then - status = nf90_def_var(ncid, var(i)%req%short_name, & + status = nf90_def_var(ncid, var_grd(i)%req%short_name, & lprecision, dimid(1:2), varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//var(i)%req%short_name) - status = nf90_put_att(ncid,varid, 'long_name', var(i)%req%long_name) + 'ERROR: defining variable '//var_grd(i)%req%short_name) + status = nf90_put_att(ncid,varid, 'long_name', var_grd(i)%req%long_name) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//var(i)%req%short_name) - status = nf90_put_att(ncid, varid, 'units', var(i)%req%units) + 'ERROR: defining long_name for '//var_grd(i)%req%short_name) + status = nf90_put_att(ncid, varid, 'units', var_grd(i)%req%units) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//var(i)%req%short_name) - status = nf90_put_att(ncid, varid, 'coordinates', var(i)%coordinates) + 'ERROR: defining units for '//var_grd(i)%req%short_name) + status = nf90_put_att(ncid, varid, 'coordinates', var_grd(i)%coordinates) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//var(i)%req%short_name) - call ice_write_hist_fill(ncid,varid,var(i)%req%short_name,history_precision) + 'ERROR: defining coordinates for '//var_grd(i)%req%short_name) + call ice_write_hist_fill(ncid,varid,var_grd(i)%req%short_name,history_precision) endif enddo @@ -467,51 +505,7 @@ subroutine ice_write_hist (ns) lprecision, dimid, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- - if (hist_avg) then - if (TRIM(avail_hist_fields(n)%vname)/='sig1' & - .or.TRIM(avail_hist_fields(n)%vname)/='sig2' & - .or.TRIM(avail_hist_fields(n)%vname)/='sistreave' & - .or.TRIM(avail_hist_fields(n)%vname)/='sistremax' & - .or.TRIM(avail_hist_fields(n)%vname)/='sigP') then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//avail_hist_fields(n)%vname) - endif - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg & - .or. n==n_divu(ns) .or. n==n_shear(ns) & ! snapshots - .or. n==n_sig1(ns) .or. n==n_sig2(ns) & - .or. n==n_sigP(ns) .or. n==n_trsig(ns) & - .or. n==n_sistreave(ns) .or. n==n_sistremax(ns) & - .or. n==n_mlt_onset(ns) .or. n==n_frz_onset(ns) & - .or. n==n_hisnap(ns) .or. n==n_aisnap(ns)) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_2D @@ -526,39 +520,7 @@ subroutine ice_write_hist (ns) lprecision, dimidz, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- - if (hist_avg) then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//avail_hist_fields(n)%vname) - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Dc @@ -573,24 +535,7 @@ subroutine ice_write_hist (ns) lprecision, dimidz, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Dz @@ -605,24 +550,7 @@ subroutine ice_write_hist (ns) lprecision, dimidz, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Db @@ -637,24 +565,7 @@ subroutine ice_write_hist (ns) lprecision, dimidz, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Da @@ -669,24 +580,7 @@ subroutine ice_write_hist (ns) lprecision, dimidz, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Df @@ -703,39 +597,7 @@ subroutine ice_write_hist (ns) lprecision, dimidcz(1:4), varid) ! ferret if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- - if (hist_avg) then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//avail_hist_fields(n)%vname) - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Di @@ -752,39 +614,7 @@ subroutine ice_write_hist (ns) lprecision, dimidcz(1:4), varid) ! ferret if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- - if (hist_avg) then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//avail_hist_fields(n)%vname) - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Ds @@ -801,39 +631,7 @@ subroutine ice_write_hist (ns) lprecision, dimidcz(1:4), varid) ! ferret if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- - if (hist_avg) then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//avail_hist_fields(n)%vname) - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Df @@ -951,8 +749,8 @@ subroutine ice_write_hist (ns) !----------------------------------------------------------------- do i = 1,ncoord - call broadcast_scalar(coord_var(i)%short_name,master_task) - SELECT CASE (coord_var(i)%short_name) + call broadcast_scalar(var_coord(i)%short_name,master_task) + SELECT CASE (var_coord(i)%short_name) CASE ('TLON') ! Convert T grid longitude from -180 -> 180 to 0 to 360 work1 = TLON*rad_to_deg + c360 @@ -968,28 +766,40 @@ subroutine ice_write_hist (ns) CASE ('ULAT') work1 = ULAT*rad_to_deg call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('NLON') + work1 = NLON*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('NLAT') + work1 = NLAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ELON') + work1 = ELON*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ELAT') + work1 = ELAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) END SELECT if (my_task == master_task) then - status = nf90_inq_varid(ncid, coord_var(i)%short_name, varid) + status = nf90_inq_varid(ncid, var_coord(i)%short_name, varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//coord_var(i)%short_name) + 'ERROR: getting varid for '//var_coord(i)%short_name) status = nf90_put_var(ncid,varid,work_g1) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing'//coord_var(i)%short_name) + 'ERROR: writing'//var_coord(i)%short_name) endif enddo ! Extra dimensions (NCAT, NFSD, VGRD*) - do i = 1, nvarz + do i = 1, nvar_grdz if (igrdz(i)) then - call broadcast_scalar(var_nz(i)%short_name,master_task) + call broadcast_scalar(var_grdz(i)%short_name,master_task) if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_nz(i)%short_name, varid) + status = nf90_inq_varid(ncid, var_grdz(i)%short_name, varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//var_nz(i)%short_name) - SELECT CASE (var_nz(i)%short_name) + 'ERROR: getting varid for '//var_grdz(i)%short_name) + SELECT CASE (var_grdz(i)%short_name) CASE ('NCAT') status = nf90_put_var(ncid,varid,hin_max(1:ncat_hist)) CASE ('NFSD') @@ -1004,7 +814,7 @@ subroutine ice_write_hist (ns) status = nf90_put_var(ncid,varid,(/(k, k=1,nzalyr)/)) END SELECT if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing'//var_nz(i)%short_name) + 'ERROR: writing'//var_grdz(i)%short_name) endif endif enddo @@ -1013,38 +823,28 @@ subroutine ice_write_hist (ns) ! write grid masks, area and rotation angle !----------------------------------------------------------------- - if (igrd(n_tmask)) then - call gather_global(work_g1, hm, master_task, distrb_info) - if (my_task == master_task) then - status = nf90_inq_varid(ncid, 'tmask', varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for tmask') - status = nf90_put_var(ncid,varid,work_g1) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable tmask') - endif - endif - - if (igrd(n_blkmask)) then - call gather_global(work_g1, bm, master_task, distrb_info) - if (my_task == master_task) then - status = nf90_inq_varid(ncid, 'blkmask', varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for blkmask') - status = nf90_put_var(ncid,varid,work_g1) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable blkmask') - endif - endif - - do i = 3, nvar ! note n_tmask=1, n_blkmask=2 + do i = 1, nvar_grd if (igrd(i)) then - call broadcast_scalar(var(i)%req%short_name,master_task) - SELECT CASE (var(i)%req%short_name) + call broadcast_scalar(var_grd(i)%req%short_name,master_task) + SELECT CASE (var_grd(i)%req%short_name) + CASE ('tmask') + call gather_global(work_g1, hm, master_task, distrb_info) + CASE ('umask') + call gather_global(work_g1, uvm, master_task, distrb_info) + CASE ('nmask') + call gather_global(work_g1, npm, master_task, distrb_info) + CASE ('emask') + call gather_global(work_g1, epm, master_task, distrb_info) CASE ('tarea') call gather_global(work_g1, tarea, master_task, distrb_info) CASE ('uarea') call gather_global(work_g1, uarea, master_task, distrb_info) + CASE ('narea') + call gather_global(work_g1, narea, master_task, distrb_info) + CASE ('earea') + call gather_global(work_g1, earea, master_task, distrb_info) + CASE ('blkmask') + call gather_global(work_g1, bm, master_task, distrb_info) CASE ('dxu') call gather_global(work_g1, dxu, master_task, distrb_info) CASE ('dyu') @@ -1053,6 +853,14 @@ subroutine ice_write_hist (ns) call gather_global(work_g1, dxt, master_task, distrb_info) CASE ('dyt') call gather_global(work_g1, dyt, master_task, distrb_info) + CASE ('dxn') + call gather_global(work_g1, dxn, master_task, distrb_info) + CASE ('dyn') + call gather_global(work_g1, dyn, master_task, distrb_info) + CASE ('dxe') + call gather_global(work_g1, dxe, master_task, distrb_info) + CASE ('dye') + call gather_global(work_g1, dye, master_task, distrb_info) CASE ('HTN') call gather_global(work_g1, HTN, master_task, distrb_info) CASE ('HTE') @@ -1064,12 +872,12 @@ subroutine ice_write_hist (ns) END SELECT if (my_task == master_task) then - status = nf90_inq_varid(ncid, var(i)%req%short_name, varid) + status = nf90_inq_varid(ncid, var_grd(i)%req%short_name, varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//var(i)%req%short_name) + 'ERROR: getting varid for '//var_grd(i)%req%short_name) status = nf90_put_var(ncid,varid,work_g1) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//var(i)%req%short_name) + 'ERROR: writing variable '//var_grd(i)%req%short_name) endif endif enddo @@ -1086,7 +894,7 @@ subroutine ice_write_hist (ns) endif work1_3(:,:,:) = c0 - work1 (:,:,:) = c0 + work1 (:,:,:) = c0 do i = 1, nvar_verts call broadcast_scalar(var_nverts(i)%short_name,master_task) @@ -1115,6 +923,30 @@ subroutine ice_write_hist (ns) call gather_global(work_g1, work1, master_task, distrb_info) if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) enddo + CASE ('lonn_bounds') + do ivertex = 1, nverts + work1(:,:,:) = lonn_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('latn_bounds') + do ivertex = 1, nverts + work1(:,:,:) = latn_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('lone_bounds') + do ivertex = 1, nverts + work1(:,:,:) = lone_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('late_bounds') + do ivertex = 1, nverts + work1(:,:,:) = late_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo END SELECT if (my_task == master_task) then @@ -1376,6 +1208,94 @@ subroutine ice_write_hist (ns) end subroutine ice_write_hist +!======================================================================= + + subroutine ice_write_hist_attrs(ncid, varid, hfield, ns) + + use ice_kinds_mod + use ice_calendar, only: histfreq, histfreq_n + use ice_history_shared, only: ice_hist_field, history_precision, & + hist_avg +#ifdef USE_NETCDF + use netcdf +#endif + + integer (kind=int_kind), intent(in) :: ncid ! netcdf file id + integer (kind=int_kind), intent(in) :: varid ! netcdf variable id + type (ice_hist_field) , intent(in) :: hfield ! history file info + integer (kind=int_kind), intent(in) :: ns ! history stream + + ! local variables + + integer (kind=int_kind) :: status + character(len=*), parameter :: subname = '(ice_write_hist_attrs)' + +#ifdef USE_NETCDF + status = nf90_put_att(ncid,varid,'units', hfield%vunit) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining units for '//hfield%vname) + + status = nf90_put_att(ncid,varid, 'long_name', hfield%vdesc) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining long_name for '//hfield%vname) + + status = nf90_put_att(ncid,varid,'coordinates', hfield%vcoord) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining coordinates for '//hfield%vname) + + status = nf90_put_att(ncid,varid,'cell_measures', hfield%vcellmeas) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining cell measures for '//hfield%vname) + + if (hfield%vcomment /= "none") then + status = nf90_put_att(ncid,varid,'comment', hfield%vcomment) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining comment for '//hfield%vname) + endif + + call ice_write_hist_fill(ncid,varid,hfield%vname,history_precision) + + ! Add cell_methods attribute to variables if averaged + if (hist_avg) then + if (TRIM(hfield%vname(1:4))/='sig1' & + .and.TRIM(hfield%vname(1:4))/='sig2' & + .and.TRIM(hfield%vname(1:9))/='sistreave' & + .and.TRIM(hfield%vname(1:9))/='sistremax' & + .and.TRIM(hfield%vname(1:4))/='sigP') then + status = nf90_put_att(ncid,varid,'cell_methods','time: mean') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining cell methods for '//hfield%vname) + endif + endif + + if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & + .or..not. hist_avg & + .or.TRIM(hfield%vname(1:4))=='divu' & + .or.TRIM(hfield%vname(1:5))=='shear' & + .or.TRIM(hfield%vname(1:4))=='sig1' & + .or.TRIM(hfield%vname(1:4))=='sig2' & + .or.TRIM(hfield%vname(1:4))=='sigP' & + .or.TRIM(hfield%vname(1:5))=='trsig' & + .or.TRIM(hfield%vname(1:9))=='sistreave' & + .or.TRIM(hfield%vname(1:9))=='sistremax' & + .or.TRIM(hfield%vname(1:9))=='mlt_onset' & + .or.TRIM(hfield%vname(1:9))=='frz_onset' & + .or.TRIM(hfield%vname(1:6))=='hisnap' & + .or.TRIM(hfield%vname(1:6))=='aisnap') then + status = nf90_put_att(ncid,varid,'time_rep','instantaneous') + else + status = nf90_put_att(ncid,varid,'time_rep','averaged') + endif + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining time rep for '//hfield%vname) + +#else + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & + file=__FILE__, line=__LINE__) +#endif + + end subroutine ice_write_hist_attrs + !======================================================================= subroutine ice_write_hist_fill(ncid,varid,vname,precision) @@ -1395,6 +1315,7 @@ subroutine ice_write_hist_fill(ncid,varid,vname,precision) integer (kind=int_kind) :: status character(len=*), parameter :: subname = '(ice_write_hist_fill)' +#ifdef USE_NETCDF if (precision == 8) then status = nf90_put_att(ncid,varid,'missing_value',spval_dbl) else @@ -1410,6 +1331,10 @@ subroutine ice_write_hist_fill(ncid,varid,vname,precision) endif if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining _FillValue for '//trim(vname)) +#else + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & + file=__FILE__, line=__LINE__) +#endif end subroutine ice_write_hist_fill diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index f6002ff40..f117384d9 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -10,6 +10,7 @@ module ice_restart use ice_broadcast + use ice_communicate, only: my_task, master_task use ice_kinds_mod #ifdef USE_NETCDF use netcdf @@ -27,7 +28,8 @@ module ice_restart implicit none private public :: init_restart_write, init_restart_read, & - read_restart_field, write_restart_field, final_restart + read_restart_field, write_restart_field, final_restart, & + query_field integer (kind=int_kind) :: ncid @@ -44,7 +46,6 @@ subroutine init_restart_read(ice_ic) use ice_calendar, only: msec, mmonth, mday, myear, & istep0, istep1, npt - use ice_communicate, only: my_task, master_task character(len=char_len_long), intent(in), optional :: ice_ic @@ -131,12 +132,12 @@ subroutine init_restart_write(filename_spec) use ice_blocks, only: nghost use ice_calendar, only: msec, mmonth, mday, myear, istep1 - use ice_communicate, only: my_task, master_task use ice_domain_size, only: nx_global, ny_global, ncat, nilyr, nslyr, & n_iso, n_aero, nblyr, n_zaero, n_algae, n_doc, & n_dic, n_don, n_fed, n_fep, nfsd use ice_arrays_column, only: oceanmixed_ice use ice_dyn_shared, only: kdyn + use ice_grid, only: grid_ice character(len=char_len_long), intent(in), optional :: filename_spec @@ -246,6 +247,18 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'uvel',dims) call define_rest_field(ncid,'vvel',dims) + + if (grid_ice == 'CD') then + call define_rest_field(ncid,'uvelE',dims) + call define_rest_field(ncid,'vvelE',dims) + call define_rest_field(ncid,'uvelN',dims) + call define_rest_field(ncid,'vvelN',dims) + endif + + if (grid_ice == 'C') then + call define_rest_field(ncid,'uvelE',dims) + call define_rest_field(ncid,'vvelN',dims) + endif if (restart_coszen) call define_rest_field(ncid,'coszen',dims) @@ -275,6 +288,18 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'iceumask',dims) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + call define_rest_field(ncid,'stresspT' ,dims) + call define_rest_field(ncid,'stressmT' ,dims) + call define_rest_field(ncid,'stress12T',dims) + call define_rest_field(ncid,'stresspU' ,dims) + call define_rest_field(ncid,'stressmU' ,dims) + call define_rest_field(ncid,'stress12U',dims) + call define_rest_field(ncid,'icenmask',dims) + call define_rest_field(ncid,'iceemask',dims) + endif + + if (oceanmixed_ice) then call define_rest_field(ncid,'sst',dims) call define_rest_field(ncid,'frzmlt',dims) @@ -815,7 +840,6 @@ end subroutine write_restart_field subroutine final_restart() use ice_calendar, only: istep1, idate - use ice_communicate, only: my_task, master_task integer (kind=int_kind) :: status @@ -861,6 +885,35 @@ subroutine define_rest_field(ncid, vname, dims) end subroutine define_rest_field +!======================================================================= + +! Inquire field existance +! author T. Craig + + logical function query_field(nu,vname) + + integer (kind=int_kind), intent(in) :: nu ! unit number + character (len=*) , intent(in) :: vname ! variable name + + ! local variables + + integer (kind=int_kind) :: status, varid + character(len=*), parameter :: subname = '(query_field)' + + query_field = .false. +#ifdef USE_NETCDF + if (my_task == master_task) then + status = nf90_inq_varid(ncid,trim(vname),varid) + if (status == nf90_noerr) query_field = .true. + endif + call broadcast_scalar(query_field,master_task) +#else + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined for '//trim(ice_ic), & + file=__FILE__, line=__LINE__) +#endif + + end function query_field + !======================================================================= end module ice_restart diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 index 0e91d42d0..a6660544e 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 @@ -49,9 +49,12 @@ subroutine ice_write_hist (ns) use ice_domain, only: distrb_info, nblocks use ice_domain_size, only: nx_global, ny_global, max_blocks, max_nstrm use ice_gather_scatter, only: gather_global - use ice_grid, only: TLON, TLAT, ULON, ULAT, hm, bm, tarea, uarea, & - dxu, dxt, dyu, dyt, HTN, HTE, ANGLE, ANGLET, tmask, & - lont_bounds, latt_bounds, lonu_bounds, latu_bounds + use ice_grid, only: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT, & + hm, bm, uvm, npm, epm, & + dxu, dxt, dyu, dyt, dxn, dyn, dxe, dye, HTN, HTE, ANGLE, ANGLET, & + tarea, uarea, narea, earea, tmask, umask, nmask, emask, & + lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & + lonn_bounds, latn_bounds, lone_bounds, late_bounds use ice_history_shared use ice_arrays_column, only: hin_max, floe_rad_c use ice_restart_shared, only: runid, lcdf64 @@ -89,15 +92,16 @@ subroutine ice_write_hist (ns) iodesc4di, iodesc4ds, iodesc4df type(var_desc_t) :: varid - ! 4 coordinate variables: TLON, TLAT, ULON, ULAT - INTEGER (kind=int_kind), PARAMETER :: ncoord = 4 + ! 8 coordinate variables: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT + INTEGER (kind=int_kind), PARAMETER :: ncoord = 8 ! 4 vertices in each grid cell INTEGER (kind=int_kind), PARAMETER :: nverts = 4 - ! 4 variables describe T, U grid boundaries: + ! 8 variables describe T, U, N, E grid boundaries: ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds - INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 4 + ! lonn_bounds, latn_bounds, lone_bounds, late_bounds + INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 8 TYPE coord_attributes ! netcdf coordinate attributes character (len=11) :: short_name @@ -110,10 +114,10 @@ subroutine ice_write_hist (ns) character (len=20) :: coordinates END TYPE req_attributes - TYPE(req_attributes), dimension(nvar) :: var - TYPE(coord_attributes), dimension(ncoord) :: coord_var + TYPE(req_attributes), dimension(nvar_grd) :: var_grd + TYPE(coord_attributes), dimension(ncoord) :: var_coord TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts - TYPE(coord_attributes), dimension(nvarz) :: var_nz + TYPE(coord_attributes), dimension(nvar_grdz) :: var_grdz CHARACTER (char_len), dimension(ncoord) :: coord_bounds real (kind=dbl_kind) , allocatable :: workd2(:,:,:) @@ -252,74 +256,118 @@ subroutine ice_write_hist (ns) ind = 0 ind = ind + 1 - coord_var(ind) = coord_attributes('TLON', & + var_coord(ind) = coord_attributes('TLON', & 'T grid center longitude', 'degrees_east') coord_bounds(ind) = 'lont_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('TLAT', & + var_coord(ind) = coord_attributes('TLAT', & 'T grid center latitude', 'degrees_north') coord_bounds(ind) = 'latt_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('ULON', & + var_coord(ind) = coord_attributes('ULON', & 'U grid center longitude', 'degrees_east') coord_bounds(ind) = 'lonu_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('ULAT', & + var_coord(ind) = coord_attributes('ULAT', & 'U grid center latitude', 'degrees_north') coord_bounds(ind) = 'latu_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLON', & + 'N grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lonn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLAT', & + 'N grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELON', & + 'E grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lone_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELAT', & + 'E grid center latitude', 'degrees_north') + coord_bounds(ind) = 'late_bounds' - var_nz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') - var_nz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') - var_nz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') - var_nz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') - var_nz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') - var_nz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') + var_grdz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') + var_grdz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') + var_grdz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') + var_grdz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') + var_grdz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') + var_grdz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') !----------------------------------------------------------------- ! define information for optional time-invariant variables !----------------------------------------------------------------- - var(n_tmask)%req = coord_attributes('tmask', & - 'ocean grid mask', ' ') - var(n_tmask)%coordinates = 'TLON TLAT' - - var(n_blkmask)%req = coord_attributes('blkmask', & - 'ice grid block mask', ' ') - var(n_blkmask)%coordinates = 'TLON TLAT' - - var(n_tarea)%req = coord_attributes('tarea', & + var_grd(n_tmask)%req = coord_attributes('tmask', & + 'mask of T grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_tmask)%coordinates = 'TLON TLAT' + var_grd(n_umask)%req = coord_attributes('umask', & + 'mask of U grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_umask)%coordinates = 'ULON ULAT' + var_grd(n_nmask)%req = coord_attributes('nmask', & + 'mask of N grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_nmask)%coordinates = 'NLON NLAT' + var_grd(n_emask)%req = coord_attributes('emask', & + 'mask of E grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_emask)%coordinates = 'ELON ELAT' + + var_grd(n_blkmask)%req = coord_attributes('blkmask', & + 'ice grid block mask, mytask + iblk/100', 'unitless') + var_grd(n_blkmask)%coordinates = 'TLON TLAT' + + var_grd(n_tarea)%req = coord_attributes('tarea', & 'area of T grid cells', 'm^2') - var(n_tarea)%coordinates = 'TLON TLAT' - - var(n_uarea)%req = coord_attributes('uarea', & + var_grd(n_tarea)%coordinates = 'TLON TLAT' + var_grd(n_uarea)%req = coord_attributes('uarea', & 'area of U grid cells', 'm^2') - var(n_uarea)%coordinates = 'ULON ULAT' - var(n_dxt)%req = coord_attributes('dxt', & + var_grd(n_uarea)%coordinates = 'ULON ULAT' + var_grd(n_narea)%req = coord_attributes('narea', & + 'area of N grid cells', 'm^2') + var_grd(n_narea)%coordinates = 'NLON NLAT' + var_grd(n_earea)%req = coord_attributes('earea', & + 'area of E grid cells', 'm^2') + var_grd(n_earea)%coordinates = 'ELON ELAT' + + var_grd(n_dxt)%req = coord_attributes('dxt', & 'T cell width through middle', 'm') - var(n_dxt)%coordinates = 'TLON TLAT' - var(n_dyt)%req = coord_attributes('dyt', & + var_grd(n_dxt)%coordinates = 'TLON TLAT' + var_grd(n_dyt)%req = coord_attributes('dyt', & 'T cell height through middle', 'm') - var(n_dyt)%coordinates = 'TLON TLAT' - var(n_dxu)%req = coord_attributes('dxu', & + var_grd(n_dyt)%coordinates = 'TLON TLAT' + var_grd(n_dxu)%req = coord_attributes('dxu', & 'U cell width through middle', 'm') - var(n_dxu)%coordinates = 'ULON ULAT' - var(n_dyu)%req = coord_attributes('dyu', & + var_grd(n_dxu)%coordinates = 'ULON ULAT' + var_grd(n_dyu)%req = coord_attributes('dyu', & 'U cell height through middle', 'm') - var(n_dyu)%coordinates = 'ULON ULAT' - var(n_HTN)%req = coord_attributes('HTN', & + var_grd(n_dyu)%coordinates = 'ULON ULAT' + var_grd(n_dxn)%req = coord_attributes('dxn', & + 'N cell width through middle', 'm') + var_grd(n_dxn)%coordinates = 'NLON NLAT' + var_grd(n_dyn)%req = coord_attributes('dyn', & + 'N cell height through middle', 'm') + var_grd(n_dyn)%coordinates = 'NLON NLAT' + var_grd(n_dxe)%req = coord_attributes('dxe', & + 'E cell width through middle', 'm') + var_grd(n_dxe)%coordinates = 'ELON ELAT' + var_grd(n_dye)%req = coord_attributes('dye', & + 'E cell height through middle', 'm') + var_grd(n_dye)%coordinates = 'ELON ELAT' + + var_grd(n_HTN)%req = coord_attributes('HTN', & 'T cell width on North side','m') - var(n_HTN)%coordinates = 'TLON TLAT' - var(n_HTE)%req = coord_attributes('HTE', & + var_grd(n_HTN)%coordinates = 'TLON TLAT' + var_grd(n_HTE)%req = coord_attributes('HTE', & 'T cell width on East side', 'm') - var(n_HTE)%coordinates = 'TLON TLAT' - var(n_ANGLE)%req = coord_attributes('ANGLE', & + var_grd(n_HTE)%coordinates = 'TLON TLAT' + var_grd(n_ANGLE)%req = coord_attributes('ANGLE', & 'angle grid makes with latitude line on U grid', & 'radians') - var(n_ANGLE)%coordinates = 'ULON ULAT' - var(n_ANGLET)%req = coord_attributes('ANGLET', & + var_grd(n_ANGLE)%coordinates = 'ULON ULAT' + var_grd(n_ANGLET)%req = coord_attributes('ANGLET', & 'angle grid makes with latitude line on T grid', & 'radians') - var(n_ANGLET)%coordinates = 'TLON TLAT' + var_grd(n_ANGLET)%coordinates = 'TLON TLAT' ! These fields are required for CF compliance ! dimensions (nx,ny,nverts) @@ -331,6 +379,14 @@ subroutine ice_write_hist (ns) 'longitude boundaries of U cells', 'degrees_east') var_nverts(n_latu_bnds) = coord_attributes('latu_bounds', & 'latitude boundaries of U cells', 'degrees_north') + var_nverts(n_lonn_bnds) = coord_attributes('lonn_bounds', & + 'longitude boundaries of N cells', 'degrees_east') + var_nverts(n_latn_bnds) = coord_attributes('latn_bounds', & + 'latitude boundaries of N cells', 'degrees_north') + var_nverts(n_lone_bnds) = coord_attributes('lone_bounds', & + 'longitude boundaries of E cells', 'degrees_east') + var_nverts(n_late_bnds) = coord_attributes('late_bounds', & + 'latitude boundaries of E cells', 'degrees_north') !----------------------------------------------------------------- ! define attributes for time-invariant variables @@ -340,12 +396,12 @@ subroutine ice_write_hist (ns) dimid2(2) = jmtid do i = 1, ncoord - status = pio_def_var(File, trim(coord_var(i)%short_name), lprecision, & + status = pio_def_var(File, trim(var_coord(i)%short_name), lprecision, & dimid2, varid) - status = pio_put_att(File,varid,'long_name',trim(coord_var(i)%long_name)) - status = pio_put_att(File, varid, 'units', trim(coord_var(i)%units)) - call ice_write_hist_fill(File,varid,coord_var(i)%short_name,history_precision) - if (coord_var(i)%short_name == 'ULAT') then + status = pio_put_att(File,varid,'long_name',trim(var_coord(i)%long_name)) + status = pio_put_att(File, varid, 'units', trim(var_coord(i)%units)) + call ice_write_hist_fill(File,varid,var_coord(i)%short_name,history_precision) + if (var_coord(i)%short_name == 'ULAT') then status = pio_put_att(File,varid,'comment', & trim('Latitude of NE corner of T grid cell')) endif @@ -362,39 +418,23 @@ subroutine ice_write_hist (ns) dimidex(5)=kmtida dimidex(6)=fmtid - do i = 1, nvarz + do i = 1, nvar_grdz if (igrdz(i)) then - status = pio_def_var(File, trim(var_nz(i)%short_name), lprecision, & + status = pio_def_var(File, trim(var_grdz(i)%short_name), lprecision, & (/dimidex(i)/), varid) - status = pio_put_att(File, varid, 'long_name', var_nz(i)%long_name) - status = pio_put_att(File, varid, 'units' , var_nz(i)%units) + status = pio_put_att(File, varid, 'long_name', var_grdz(i)%long_name) + status = pio_put_att(File, varid, 'units' , var_grdz(i)%units) endif enddo - ! Attributes for tmask defined separately, since it has no units - if (igrd(n_tmask)) then - status = pio_def_var(File, 'tmask', lprecision, dimid2, varid) - status = pio_put_att(File,varid, 'long_name', 'ocean grid mask') - status = pio_put_att(File, varid, 'coordinates', 'TLON TLAT') - call ice_write_hist_fill(File,varid,'tmask',history_precision) - status = pio_put_att(File,varid,'comment', '0 = land, 1 = ocean') - endif - if (igrd(n_blkmask)) then - status = pio_def_var(File, 'blkmask', lprecision, dimid2, varid) - status = pio_put_att(File,varid, 'long_name', 'ice grid block mask') - status = pio_put_att(File, varid, 'coordinates', 'TLON TLAT') - status = pio_put_att(File,varid,'comment', 'mytask + iblk/100') - call ice_write_hist_fill(File,varid,'blkmask',history_precision) - endif - - do i = 3, nvar ! note: n_tmask=1, n_blkmask=2 + do i = 1, nvar_grd if (igrd(i)) then - status = pio_def_var(File, trim(var(i)%req%short_name), & + status = pio_def_var(File, trim(var_grd(i)%req%short_name), & lprecision, dimid2, varid) - status = pio_put_att(File,varid, 'long_name', trim(var(i)%req%long_name)) - status = pio_put_att(File, varid, 'units', trim(var(i)%req%units)) - status = pio_put_att(File, varid, 'coordinates', trim(var(i)%coordinates)) - call ice_write_hist_fill(File,varid,var(i)%req%short_name,history_precision) + status = pio_put_att(File,varid, 'long_name', trim(var_grd(i)%req%long_name)) + status = pio_put_att(File, varid, 'units', trim(var_grd(i)%req%units)) + status = pio_put_att(File, varid, 'coordinates', trim(var_grd(i)%coordinates)) + call ice_write_hist_fill(File,varid,var_grd(i)%req%short_name,history_precision) endif enddo @@ -430,39 +470,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimid3, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - if (TRIM(avail_hist_fields(n)%vname)/='sig1' & - .or.TRIM(avail_hist_fields(n)%vname)/='sig2' & - .or.TRIM(avail_hist_fields(n)%vname)/='sistreave' & - .or.TRIM(avail_hist_fields(n)%vname)/='sistremax' & - .or.TRIM(avail_hist_fields(n)%vname)/='sigP') then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg & - .or. n==n_divu(ns) .or. n==n_shear(ns) & ! snapshots - .or. n==n_sig1(ns) .or. n==n_sig2(ns) & - .or. n==n_sigP(ns) .or. n==n_trsig(ns) & - .or. n==n_sistreave(ns) .or. n==n_sistremax(ns) & - .or. n==n_mlt_onset(ns) .or. n==n_frz_onset(ns) & - .or. n==n_hisnap(ns) .or. n==n_aisnap(ns)) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_2D @@ -479,27 +487,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Dc @@ -516,27 +504,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Dz @@ -553,27 +521,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Db @@ -590,27 +538,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Da @@ -627,27 +555,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Df @@ -670,27 +578,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidcz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Di @@ -708,27 +596,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidcz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Ds @@ -747,27 +615,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidcz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Df @@ -853,8 +701,8 @@ subroutine ice_write_hist (ns) allocate(workr2(nx_block,ny_block,nblocks)) do i = 1,ncoord - status = pio_inq_varid(File, coord_var(i)%short_name, varid) - SELECT CASE (coord_var(i)%short_name) + status = pio_inq_varid(File, var_coord(i)%short_name, varid) + SELECT CASE (var_coord(i)%short_name) CASE ('TLON') ! Convert T grid longitude from -180 -> 180 to 0 to 360 workd2(:,:,:) = mod(tlon(:,:,1:nblocks)*rad_to_deg + c360, c360) @@ -864,6 +712,14 @@ subroutine ice_write_hist (ns) workd2(:,:,:) = ulon(:,:,1:nblocks)*rad_to_deg CASE ('ULAT') workd2(:,:,:) = ulat(:,:,1:nblocks)*rad_to_deg + CASE ('NLON') + workd2(:,:,:) = nlon(:,:,1:nblocks)*rad_to_deg + CASE ('NLAT') + workd2(:,:,:) = nlat(:,:,1:nblocks)*rad_to_deg + CASE ('ELON') + workd2(:,:,:) = elon(:,:,1:nblocks)*rad_to_deg + CASE ('ELAT') + workd2(:,:,:) = elat(:,:,1:nblocks)*rad_to_deg END SELECT if (history_precision == 8) then call pio_write_darray(File, varid, iodesc2d, & @@ -877,10 +733,10 @@ subroutine ice_write_hist (ns) ! Extra dimensions (NCAT, NFSD, VGRD*) - do i = 1, nvarz + do i = 1, nvar_grdz if (igrdz(i)) then - status = pio_inq_varid(File, var_nz(i)%short_name, varid) - SELECT CASE (var_nz(i)%short_name) + status = pio_inq_varid(File, var_grdz(i)%short_name, varid) + SELECT CASE (var_grdz(i)%short_name) CASE ('NCAT') status = pio_put_var(File, varid, hin_max(1:ncat_hist)) CASE ('NFSD') @@ -901,36 +757,43 @@ subroutine ice_write_hist (ns) ! write grid masks, area and rotation angle !----------------------------------------------------------------- -! if (igrd(n_tmask)) then -! status = pio_inq_varid(File, 'tmask', varid) -! call pio_write_darray(File, varid, iodesc2d, & -! hm(:,:,1:nblocks), status, fillval=spval_dbl) -! endif -! if (igrd(n_blkmask)) then -! status = pio_inq_varid(File, 'blkmask', varid) -! call pio_write_darray(File, varid, iodesc2d, & -! bm(:,:,1:nblocks), status, fillval=spval_dbl) -! endif - - do i = 1, nvar ! note: n_tmask=1, n_blkmask=2 + do i = 1, nvar_grd if (igrd(i)) then - SELECT CASE (var(i)%req%short_name) + SELECT CASE (var_grd(i)%req%short_name) CASE ('tmask') workd2 = hm(:,:,1:nblocks) + CASE ('umask') + workd2 = uvm(:,:,1:nblocks) + CASE ('nmask') + workd2 = npm(:,:,1:nblocks) + CASE ('emask') + workd2 = epm(:,:,1:nblocks) CASE ('blkmask') workd2 = bm(:,:,1:nblocks) CASE ('tarea') workd2 = tarea(:,:,1:nblocks) CASE ('uarea') workd2 = uarea(:,:,1:nblocks) - CASE ('dxu') - workd2 = dxu(:,:,1:nblocks) - CASE ('dyu') - workd2 = dyu(:,:,1:nblocks) + CASE ('narea') + workd2 = narea(:,:,1:nblocks) + CASE ('earea') + workd2 = earea(:,:,1:nblocks) CASE ('dxt') workd2 = dxt(:,:,1:nblocks) CASE ('dyt') workd2 = dyt(:,:,1:nblocks) + CASE ('dxu') + workd2 = dxu(:,:,1:nblocks) + CASE ('dyu') + workd2 = dyu(:,:,1:nblocks) + CASE ('dxn') + workd2 = dxn(:,:,1:nblocks) + CASE ('dyn') + workd2 = dyn(:,:,1:nblocks) + CASE ('dxe') + workd2 = dxe(:,:,1:nblocks) + CASE ('dye') + workd2 = dye(:,:,1:nblocks) CASE ('HTN') workd2 = HTN(:,:,1:nblocks) CASE ('HTE') @@ -940,7 +803,7 @@ subroutine ice_write_hist (ns) CASE ('ANGLET') workd2 = ANGLET(:,:,1:nblocks) END SELECT - status = pio_inq_varid(File, var(i)%req%short_name, varid) + status = pio_inq_varid(File, var_grd(i)%req%short_name, varid) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc2d, & workd2, status, fillval=spval_dbl) @@ -978,6 +841,22 @@ subroutine ice_write_hist (ns) do ivertex = 1, nverts workd3v(ivertex,:,:,:) = latu_bounds(ivertex,:,:,1:nblocks) enddo + CASE ('lonn_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = lonn_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('latn_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = latn_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('lone_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = lone_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('late_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = late_bounds(ivertex,:,:,1:nblocks) + enddo END SELECT status = pio_inq_varid(File, var_nverts(i)%short_name, varid) @@ -1317,6 +1196,73 @@ subroutine ice_write_hist (ns) end subroutine ice_write_hist +!======================================================================= + + subroutine ice_write_hist_attrs(File, varid, hfield, ns) + + use ice_kinds_mod + use ice_calendar, only: histfreq, histfreq_n + use ice_history_shared, only: ice_hist_field, history_precision, & + hist_avg + use ice_pio + use pio + + type(file_desc_t) :: File ! file id + type(var_desc_t) :: varid ! variable id + type (ice_hist_field), intent(in) :: hfield ! history file info + integer (kind=int_kind), intent(in) :: ns + + ! local variables + + integer (kind=int_kind) :: status + character(len=*), parameter :: subname = '(ice_write_hist_attrs)' + + status = pio_put_att(File,varid,'units', trim(hfield%vunit)) + + status = pio_put_att(File,varid, 'long_name', trim(hfield%vdesc)) + + status = pio_put_att(File,varid,'coordinates', trim(hfield%vcoord)) + + status = pio_put_att(File,varid,'cell_measures', trim(hfield%vcellmeas)) + + if (hfield%vcomment /= "none") then + status = pio_put_att(File,varid,'comment', trim(hfield%vcomment)) + endif + + call ice_write_hist_fill(File,varid,hfield%vname,history_precision) + + ! Add cell_methods attribute to variables if averaged + if (hist_avg) then + if (TRIM(hfield%vname(1:4))/='sig1' & + .and.TRIM(hfield%vname(1:4))/='sig2' & + .and.TRIM(hfield%vname(1:9))/='sistreave' & + .and.TRIM(hfield%vname(1:9))/='sistremax' & + .and.TRIM(hfield%vname(1:4))/='sigP') then + status = pio_put_att(File,varid,'cell_methods','time: mean') + endif + endif + + if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & + .or..not. hist_avg & + .or.TRIM(hfield%vname(1:4))=='divu' & + .or.TRIM(hfield%vname(1:5))=='shear' & + .or.TRIM(hfield%vname(1:4))=='sig1' & + .or.TRIM(hfield%vname(1:4))=='sig2' & + .or.TRIM(hfield%vname(1:4))=='sigP' & + .or.TRIM(hfield%vname(1:5))=='trsig' & + .or.TRIM(hfield%vname(1:9))=='sistreave' & + .or.TRIM(hfield%vname(1:9))=='sistremax' & + .or.TRIM(hfield%vname(1:9))=='mlt_onset' & + .or.TRIM(hfield%vname(1:9))=='frz_onset' & + .or.TRIM(hfield%vname(1:6))=='hisnap' & + .or.TRIM(hfield%vname(1:6))=='aisnap') then + status = pio_put_att(File,varid,'time_rep','instantaneous') + else + status = pio_put_att(File,varid,'time_rep','averaged') + endif + + end subroutine ice_write_hist_attrs + !======================================================================= subroutine ice_write_hist_fill(File,varid,vname,precision) diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index c9e7fdf8a..e585788b7 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -6,6 +6,7 @@ module ice_restart use ice_broadcast + use ice_communicate, only: my_task, master_task use ice_exit, only: abort_ice use ice_fileunits, only: nu_diag, nu_restart, nu_rst_pointer use ice_kinds_mod @@ -22,7 +23,8 @@ module ice_restart implicit none private public :: init_restart_write, init_restart_read, & - read_restart_field, write_restart_field, final_restart + read_restart_field, write_restart_field, final_restart, & + query_field type(file_desc_t) :: File type(var_desc_t) :: vardesc @@ -43,7 +45,6 @@ subroutine init_restart_read(ice_ic) use ice_calendar, only: istep0, istep1, myear, mmonth, & mday, msec, npt - use ice_communicate, only: my_task, master_task use ice_domain_size, only: ncat use ice_read_write, only: ice_open @@ -139,12 +140,12 @@ end subroutine init_restart_read subroutine init_restart_write(filename_spec) use ice_calendar, only: msec, mmonth, mday, myear, istep1 - use ice_communicate, only: my_task, master_task use ice_domain_size, only: nx_global, ny_global, ncat, nilyr, nslyr, & n_iso, n_aero, nblyr, n_zaero, n_algae, n_doc, & n_dic, n_don, n_fed, n_fep, nfsd use ice_dyn_shared, only: kdyn use ice_arrays_column, only: oceanmixed_ice + use ice_grid, only: grid_ice logical (kind=log_kind) :: & solve_zsal, skl_bgc, z_tracers @@ -251,6 +252,20 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'uvel',dims) call define_rest_field(File,'vvel',dims) + + if (grid_ice == 'CD') then + call define_rest_field(File,'uvelE',dims) + call define_rest_field(File,'vvelE',dims) + call define_rest_field(File,'uvelN',dims) + call define_rest_field(File,'vvelN',dims) + endif + + if (grid_ice == 'C') then + call define_rest_field(File,'uvelE',dims) + call define_rest_field(File,'vvelN',dims) + endif + + if (restart_coszen) call define_rest_field(File,'coszen',dims) call define_rest_field(File,'scale_factor',dims) call define_rest_field(File,'swvdr',dims) @@ -278,6 +293,17 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'iceumask',dims) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + call define_rest_field(File,'stresspT' ,dims) + call define_rest_field(File,'stressmT' ,dims) + call define_rest_field(File,'stress12T',dims) + call define_rest_field(File,'stresspU' ,dims) + call define_rest_field(File,'stressmU' ,dims) + call define_rest_field(File,'stress12U',dims) + call define_rest_field(File,'icenmask',dims) + call define_rest_field(File,'iceemask',dims) + endif + if (oceanmixed_ice) then call define_rest_field(File,'sst',dims) call define_rest_field(File,'frzmlt',dims) @@ -669,7 +695,6 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & field_loc, field_type) use ice_blocks, only: nx_block, ny_block - use ice_communicate, only: my_task, master_task use ice_constants, only: c0, field_loc_center use ice_boundary, only: ice_HaloUpdate use ice_domain, only: halo_info, distrb_info, nblocks @@ -761,8 +786,7 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & amax = global_maxval(work(:,:,n,:),distrb_info) asum = global_sum(work(:,:,n,:), distrb_info, field_loc_center) if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum + write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif enddo else @@ -770,9 +794,7 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & amax = global_maxval(work(:,:,1,:),distrb_info) asum = global_sum(work(:,:,1,:), distrb_info, field_loc_center) if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum - write(nu_diag,*) '' + write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif endif @@ -791,7 +813,6 @@ end subroutine read_restart_field subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) use ice_blocks, only: nx_block, ny_block - use ice_communicate, only: my_task, master_task use ice_constants, only: c0, field_loc_center use ice_domain, only: distrb_info, nblocks use ice_domain_size, only: max_blocks, ncat @@ -851,8 +872,7 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) amax = global_maxval(work(:,:,n,:),distrb_info) asum = global_sum(work(:,:,n,:), distrb_info, field_loc_center) if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum + write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif enddo else @@ -860,8 +880,7 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) amax = global_maxval(work(:,:,1,:),distrb_info) asum = global_sum(work(:,:,1,:), distrb_info, field_loc_center) if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum + write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif endif endif @@ -879,7 +898,6 @@ end subroutine write_restart_field subroutine final_restart() use ice_calendar, only: istep1, idate, msec - use ice_communicate, only: my_task, master_task character(len=*), parameter :: subname = '(final_restart)' @@ -912,6 +930,35 @@ subroutine define_rest_field(File, vname, dims) end subroutine define_rest_field +!======================================================================= + +! Inquire field existance +! author T. Craig + + logical function query_field(nu,vname) + + integer (kind=int_kind), intent(in) :: nu ! unit number + character (len=*) , intent(in) :: vname ! variable name + + ! local variables + + integer (kind=int_kind) :: status, varid + character(len=*), parameter :: subname = '(query_field)' + + query_field = .false. +#ifdef USE_NETCDF + if (my_task == master_task) then + status = pio_inq_varid(File,trim(vname),vardesc) + if (status == PIO_noerr) query_field = .true. + endif + call broadcast_scalar(query_field,master_task) +#else + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined for '//trim(ice_ic), & + file=__FILE__, line=__LINE__) +#endif + + end function query_field + !======================================================================= end module ice_restart diff --git a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 index cd81de879..61f261bb2 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 @@ -153,7 +153,7 @@ subroutine ice_step use ice_state, only: trcrn use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -210,7 +210,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 index ecd95e3c3..eb2bdcbf1 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 @@ -153,7 +153,7 @@ subroutine ice_step use ice_state, only: trcrn use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -210,7 +210,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 index 610b146a6..6ede4411d 100644 --- a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 @@ -157,7 +157,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -213,7 +213,7 @@ subroutine ice_step call t_stopf ('cice_run_presc') endif - call save_init + call step_prep call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics diff --git a/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 b/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 index 08681d84f..b0a78bfcd 100644 --- a/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 +++ b/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 @@ -44,7 +44,7 @@ module ice_comp_esmf use ice_domain, only : nblocks, blocks_ice, halo_info, distrb_info use ice_blocks, only : block, get_block, nx_block, ny_block use ice_grid, only : tlon, tlat, tarea, tmask, anglet, hm, & - grid_type, t2ugrid_vector, gridcpl_file, ocn_gridcell_frac + grid_type, gridcpl_file, ocn_gridcell_frac use ice_constants, only : c0, c1, spval_dbl, rad_to_deg, radius, secday use ice_communicate, only : my_task, master_task, MPI_COMM_ICE use ice_calendar, only : istep, istep1, force_restart_now, write_ic,& diff --git a/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 b/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 index 64dff54e2..d663d0f97 100644 --- a/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 +++ b/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 @@ -42,7 +42,7 @@ module ice_comp_mct use ice_domain, only : nblocks, blocks_ice, halo_info, distrb_info use ice_blocks, only : block, get_block, nx_block, ny_block use ice_grid, only : tlon, tlat, tarea, tmask, anglet, hm, & - grid_type, t2ugrid_vector, gridcpl_file, ocn_gridcell_frac + grid_type, gridcpl_file, ocn_gridcell_frac use ice_constants, only : c0, c1, spval_dbl, radius use ice_constants, only : ice_init_constants use ice_communicate, only : my_task, master_task, MPI_COMM_ICE diff --git a/cicecore/drivers/mct/cesm1/ice_import_export.F90 b/cicecore/drivers/mct/cesm1/ice_import_export.F90 index d42d3f8a1..f88cc2b2d 100644 --- a/cicecore/drivers/mct/cesm1/ice_import_export.F90 +++ b/cicecore/drivers/mct/cesm1/ice_import_export.F90 @@ -9,7 +9,7 @@ module ice_import_export use ice_constants , only: field_type_vector, c100 use ice_constants , only: p001, p5 use ice_blocks , only: block, get_block, nx_block, ny_block - use ice_flux , only: strairxt, strairyt, strocnxt, strocnyt + use ice_flux , only: strairxT, strairyT, strocnxT, strocnyT use ice_flux , only: alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only: flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only: fresh, fsalt, zlvl, uatm, vatm, potT, Tair, Qa @@ -29,7 +29,7 @@ module ice_import_export use ice_domain , only: nblocks, blocks_ice, halo_info, distrb_info use ice_domain_size , only: nx_global, ny_global, block_size_x, block_size_y, max_blocks use ice_grid , only: tlon, tlat, tarea, tmask, anglet, hm - use ice_grid , only: grid_type, t2ugrid_vector + use ice_grid , only: grid_type, grid_average_X2Y use ice_boundary , only: ice_HaloUpdate use ice_communicate , only: my_task, master_task, MPI_COMM_ICE, get_num_procs use ice_calendar , only: istep, istep1, diagfreq @@ -65,6 +65,7 @@ subroutine ice_import( x2i ) type(block) :: this_block ! block information for current block integer,parameter :: nflds=17,nfldv=6,nfldb=27 real (kind=dbl_kind),allocatable :: aflds(:,:,:,:) + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work real (kind=dbl_kind) :: workx, worky real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP character(len=char_len) :: tfrz_option @@ -468,10 +469,19 @@ subroutine ice_import( x2i ) if (.not.prescribed_ice) then call t_startf ('cice_imp_t2u') - call t2ugrid_vector(uocn) - call t2ugrid_vector(vocn) - call t2ugrid_vector(ss_tltx) - call t2ugrid_vector(ss_tlty) + call ice_HaloUpdate(uocn, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) + ! tcraig, moved to dynamics for consistency + !work = uocn + !call grid_average_X2Y('F',work,'T',uocn,'U') + !work = vocn + !call grid_average_X2Y('F',work,'T',vocn,'U') + !work = ss_tltx + !call grid_average_X2Y('F',work,'T',ss_tltx,'U') + !work = ss_tlty + !call grid_average_X2Y('F',work,'T',ss_tlty,'U') call t_stopf ('cice_imp_t2u') end if diff --git a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 index 779adc65d..6f145ab0e 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 @@ -196,7 +196,7 @@ subroutine ice_step endif #endif - call save_init + call step_prep call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index 8fe939785..dbdf5c07d 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -10,7 +10,7 @@ module ice_import_export use ice_domain , only : nblocks, blocks_ice, halo_info, distrb_info use ice_domain_size , only : nx_global, ny_global, block_size_x, block_size_y, max_blocks, ncat use ice_exit , only : abort_ice - use ice_flux , only : strairxt, strairyt, strocnxt, strocnyt + use ice_flux , only : strairxT, strairyT, strocnxT, strocnyT use ice_flux , only : alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only : flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only : fswthru_vdr, fswthru_vdf, fswthru_idr, fswthru_idf @@ -25,7 +25,7 @@ module ice_import_export use ice_flux , only : sss, Tf, wind, fsw use ice_state , only : vice, vsno, aice, aicen_init, trcr use ice_grid , only : tlon, tlat, tarea, tmask, anglet, hm - use ice_grid , only : grid_type, t2ugrid_vector + use ice_grid , only : grid_type, grid_average_X2Y use ice_mesh_mod , only : ocn_gridcell_frac use ice_boundary , only : ice_HaloUpdate use ice_fileunits , only : nu_diag, flush_fileunit @@ -407,6 +407,7 @@ subroutine ice_import( importState, rc ) integer :: ilo, ihi, jlo, jhi !beginning and end of physical domain type(block) :: this_block ! block information for current block real (kind=dbl_kind),allocatable :: aflds(:,:,:,:) + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work real (kind=dbl_kind) :: workx, worky real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP real (kind=dbl_kind) :: Tffresh @@ -799,10 +800,19 @@ subroutine ice_import( importState, rc ) if (.not.prescribed_ice) then call t_startf ('cice_imp_t2u') - call t2ugrid_vector(uocn) - call t2ugrid_vector(vocn) - call t2ugrid_vector(ss_tltx) - call t2ugrid_vector(ss_tlty) + call ice_HaloUpdate(uocn, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) + ! tcraig, moved to dynamics for consistency + !work = uocn + !call grid_average_X2Y('F',work,'T',uocn,'U') + !work = vocn + !call grid_average_X2Y('F',work,'T',vocn,'U') + !work = ss_tltx + !call grid_average_X2Y('F',work,'T',ss_tltx,'U') + !work = ss_tlty + !call grid_average_X2Y('F',work,'T',ss_tlty,'U') call t_stopf ('cice_imp_t2u') end if diff --git a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 index fffe575de..e7fb5f632 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 @@ -436,7 +436,7 @@ end subroutine ice_mesh_create_scolumn subroutine ice_mesh_init_tlon_tlat_area_hm() use ice_grid , only : tlon, tlat, hm, tarea, ULON, ULAT, HTN, HTE, ANGLE, ANGLET - use ice_grid , only : uarea, uarear, tarear, tinyarea + use ice_grid , only : uarea, uarear, tarear!, tinyarea use ice_grid , only : dxt, dyt, dxu, dyu, dyhx, dxhy, cyp, cxp, cym, cxm use ice_grid , only : makemask use ice_boundary , only : ice_HaloUpdate @@ -517,7 +517,7 @@ subroutine ice_mesh_init_tlon_tlat_area_hm() endif tarear(i,j,iblk) = c1/tarea(i,j,iblk) uarear(i,j,iblk) = c1/uarea(i,j,iblk) - tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) +! tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) if (.not. single_column) then if (ny_global == 1) then diff --git a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 index 33b9a165c..1aaee77f4 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 @@ -165,7 +165,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave, step_snow + biogeochemistry, step_prep, step_dyn_wave, step_snow use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -224,7 +224,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/nuopc/dmi/cice_cap.info b/cicecore/drivers/nuopc/dmi/cice_cap.info index 49127cc15..2faa623ec 100644 --- a/cicecore/drivers/nuopc/dmi/cice_cap.info +++ b/cicecore/drivers/nuopc/dmi/cice_cap.info @@ -18,7 +18,7 @@ module cice_cap use ice_calendar, only: dt use ice_flux use ice_grid, only: TLAT, TLON, ULAT, ULON, hm, tarea, ANGLET, ANGLE, & - dxt, dyt, t2ugrid_vector + dxt, dyt, grid_average_X2Y use ice_state use CICE_RunMod use CICE_InitMod @@ -934,12 +934,22 @@ module cice_cap ss_tlty(i,j,iblk) = ue*sin(AngT_s) + vn*cos(AngT_s) enddo enddo - call t2ugrid_vector(ss_tltx) - call t2ugrid_vector(ss_tlty) - call t2ugrid_vector(uocn) - call t2ugrid_vector(vocn) enddo +! call ice_HaloUpdate(uocn, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) + ! tcraig, moved to dynamics for consistency + !work = uocn + !call grid_average_X2Y('F',work,'T',uocn,'U') + !work = vocn + !call grid_average_X2Y('F',work,'T',vocn,'U') + !work = ss_tltx + !call grid_average_X2Y('F',work,'T',ss_tltx,'U') + !work = ss_tlty + !call grid_average_X2Y('F',work,'T',ss_tlty,'U') + end subroutine subroutine CICE_Export(st,rc) type(ESMF_State) :: st diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 363025b9b..0130d2588 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -15,6 +15,7 @@ module CICE_InitMod use ice_kinds_mod use ice_exit, only: abort_ice use ice_fileunits, only: init_fileunits, nu_diag + use ice_memusage, only: ice_memusage_init, ice_memusage_print use icepack_intfc, only: icepack_aggregate use icepack_intfc, only: icepack_init_itd, icepack_init_itd_hist use icepack_intfc, only: icepack_init_fsd_bounds, icepack_init_wave @@ -110,6 +111,12 @@ subroutine cice_init call input_zbgc ! vertical biogeochemistry namelist call count_tracers ! count tracers + ! Call this as early as possible, must be after memory_stats is read + if (my_task == master_task) then + call ice_memusage_init(nu_diag) + call ice_memusage_print(nu_diag,subname//':start') + endif + call init_domain_blocks ! set up block decomposition call init_grid1 ! domain distribution call alloc_grid ! allocate grid arrays @@ -238,6 +245,10 @@ subroutine cice_init if (write_ic) call accum_hist(dt) ! write initial conditions + if (my_task == master_task) then + call ice_memusage_print(nu_diag,subname//':end') + endif + end subroutine cice_init !======================================================================= diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index aef6a1ec1..78e3b5259 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -15,11 +15,13 @@ module CICE_RunMod use ice_kinds_mod + use ice_communicate, only: my_task, master_task use ice_fileunits, only: nu_diag use ice_arrays_column, only: oceanmixed_ice use ice_constants, only: c0, c1 use ice_constants, only: field_loc_center, field_type_scalar use ice_exit, only: abort_ice + use ice_memusage, only: ice_memusage_print use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_max_iso, icepack_max_aero use icepack_intfc, only: icepack_query_parameters @@ -157,7 +159,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave, step_snow + biogeochemistry, step_prep, step_dyn_wave, step_snow use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -216,7 +218,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep if (ktherm >= 0) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) @@ -378,6 +380,9 @@ subroutine ice_step if (solve_zsal) call zsal_diags if (skl_bgc .or. z_tracers) call bgc_diags if (tr_brine) call hbrine_diags + if (my_task == master_task) then + call ice_memusage_print(nu_diag,subname) + endif endif call ice_timer_stop(timer_diags) ! diagnostics diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 new file mode 100644 index 000000000..60f71fa8a --- /dev/null +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -0,0 +1,486 @@ +!======================================================================= +! +! This module contains the CICE initialization routine that sets model +! parameters and initializes the grid and CICE state variables. +! +! authors Elizabeth C. Hunke, LANL +! William H. Lipscomb, LANL +! Philip W. Jones, LANL +! +! 2006: Converted to free form source (F90) by Elizabeth Hunke +! 2008: E. Hunke moved ESMF code to its own driver + + module CICE_InitMod + + use ice_kinds_mod + use ice_exit, only: abort_ice + use ice_fileunits, only: init_fileunits, nu_diag + use icepack_intfc, only: icepack_aggregate + use icepack_intfc, only: icepack_init_itd, icepack_init_itd_hist + use icepack_intfc, only: icepack_init_fsd_bounds, icepack_init_wave + use icepack_intfc, only: icepack_configure + use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted + use icepack_intfc, only: icepack_query_parameters, icepack_query_tracer_flags, & + icepack_query_tracer_indices, icepack_query_tracer_sizes + + implicit none + private + public :: CICE_Initialize, cice_init + +!======================================================================= + + contains + +!======================================================================= + +! Initialize the basic state, grid and all necessary parameters for +! running the CICE model. Return the initial state in routine +! export state. +! Note: This initialization driver is designed for standalone and +! CESM-coupled applications. For other +! applications (e.g., standalone CAM), this driver would be +! replaced by a different driver that calls subroutine cice_init, +! where most of the work is done. + + subroutine CICE_Initialize + + character(len=*), parameter :: subname='(CICE_Initialize)' + !-------------------------------------------------------------------- + ! model initialization + !-------------------------------------------------------------------- + + call cice_init + + end subroutine CICE_Initialize + +!======================================================================= +! +! Initialize CICE model. + + subroutine cice_init + + use ice_arrays_column, only: hin_max, c_hi_range, alloc_arrays_column + use ice_arrays_column, only: floe_rad_l, floe_rad_c, & + floe_binwidth, c_fsd_range + use ice_state, only: alloc_state + use ice_flux_bgc, only: alloc_flux_bgc + use ice_calendar, only: dt, dt_dyn, istep, istep1, write_ic, & + init_calendar, advance_timestep, calc_timesteps + use ice_communicate, only: init_communicate, my_task, master_task + use ice_diagnostics, only: init_diags + use ice_domain, only: init_domain_blocks + use ice_domain_size, only: ncat, nfsd + use ice_dyn_eap, only: init_eap, alloc_dyn_eap + use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_vp, only: init_vp + use ice_flux, only: init_coupler_flux, init_history_therm, & + init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux + use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & + get_forcing_atmo, get_forcing_ocn, get_wave_spec + use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & + faero_default, faero_optics, alloc_forcing_bgc, fiso_default + use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_history, only: init_hist, accum_hist + use ice_restart_shared, only: restart, runtype + use ice_init, only: input_data, init_state + use ice_init_column, only: init_thermo_vertical, init_shortwave, init_zbgc, input_zbgc, count_tracers + use ice_kinds_mod + use ice_restoring, only: ice_HaloRestore_init + use ice_timers, only: timer_total, init_ice_timers, ice_timer_start + use ice_transport_driver, only: init_transport + + logical(kind=log_kind) :: tr_aero, tr_zaero, skl_bgc, z_tracers, & + tr_iso, tr_fsd, wave_spec + character(len=*), parameter :: subname = '(cice_init)' + + call init_communicate ! initial setup for message passing + call init_fileunits ! unit numbers + + ! tcx debug, this will create a different logfile for each pe + ! if (my_task /= master_task) nu_diag = 100+my_task + + call icepack_configure() ! initialize icepack + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + call input_data ! namelist variables + call input_zbgc ! vertical biogeochemistry namelist + call count_tracers ! count tracers + + call init_domain_blocks ! set up block decomposition + call init_grid1 ! domain distribution + call alloc_grid ! allocate grid arrays + call alloc_arrays_column ! allocate column arrays + call alloc_state ! allocate state arrays + call alloc_dyn_shared ! allocate dyn shared arrays + call alloc_flux_bgc ! allocate flux_bgc arrays + call alloc_flux ! allocate flux arrays + call init_ice_timers ! initialize all timers + call ice_timer_start(timer_total) ! start timing entire run + call init_grid2 ! grid variables + call init_zbgc ! vertical biogeochemistry initialization + call init_calendar ! initialize some calendar stuff + call init_hist (dt) ! initialize output history file + + call init_dyn (dt_dyn) ! define dynamics parameters, variables + if (kdyn == 2) then + call alloc_dyn_eap ! allocate dyn_eap arrays + call init_eap ! define eap dynamics parameters, variables + else if (kdyn == 3) then + call init_vp ! define vp dynamics parameters, variables + endif + + call init_coupler_flux ! initialize fluxes exchanged with coupler + + call init_thermo_vertical ! initialize vertical thermodynamics + + call icepack_init_itd(ncat=ncat, hin_max=hin_max) ! ice thickness distribution + if (my_task == master_task) then + call icepack_init_itd_hist(ncat=ncat, hin_max=hin_max, c_hi_range=c_hi_range) ! output + endif + + call icepack_query_tracer_flags(tr_fsd_out=tr_fsd) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (tr_fsd) call icepack_init_fsd_bounds (nfsd, & ! floe size distribution + floe_rad_l, & ! fsd size lower bound in m (radius) + floe_rad_c, & ! fsd size bin centre in m (radius) + floe_binwidth, & ! fsd size bin width in m (radius) + c_fsd_range, & ! string for history output + write_diags=(my_task == master_task)) ! write diag on master only + + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call init_forcing_ocn(dt) ! initialize sss and sst from data + call init_state ! initialize the ice state + call init_transport ! initialize horizontal transport + call ice_HaloRestore_init ! restored boundary conditions + + call icepack_query_parameters(skl_bgc_out=skl_bgc, z_tracers_out=z_tracers, & + wave_spec_out=wave_spec) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (skl_bgc .or. z_tracers) call alloc_forcing_bgc ! allocate biogeochemistry arrays + + call init_restart ! initialize restart variables + call init_diags ! initialize diagnostic output points + call init_history_therm ! initialize thermo history variables + call init_history_dyn ! initialize dynamic history variables + call calc_timesteps ! update timestep counter if not using npt_unit="1" + + call icepack_query_tracer_flags(tr_aero_out=tr_aero, tr_zaero_out=tr_zaero) + call icepack_query_tracer_flags(tr_iso_out=tr_iso) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + !property tables + + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing + ! in prep_radiation. + if (trim(runtype) == 'continue' .or. restart) & + call init_shortwave ! initialize radiative transfer + +! tcraig, use advance_timestep here +! istep = istep + 1 ! update time step counters +! istep1 = istep1 + 1 +! time = time + dt ! determine the time and date +! call calendar(time) ! at the end of the first timestep + call advance_timestep() + + !-------------------------------------------------------------------- + ! coupler communication or forcing data initialization + !-------------------------------------------------------------------- + + call init_forcing_atmo ! initialize atmospheric forcing (standalone) + + if (tr_fsd .and. wave_spec) call get_wave_spec ! wave spectrum in ice + call get_forcing_atmo ! atmospheric forcing from data + call get_forcing_ocn(dt) ! ocean forcing from data + + ! isotopes + if (tr_iso) call fiso_default ! default values + ! aerosols + ! if (tr_aero) call faero_data ! data file + ! if (tr_zaero) call fzaero_data ! data file (gx1) + if (tr_aero .or. tr_zaero) call faero_default ! default values + if (skl_bgc .or. z_tracers) call get_forcing_bgc ! biogeochemistry + if (z_tracers) call get_atm_bgc ! biogeochemistry + + if (runtype == 'initial' .and. .not. restart) & + call init_shortwave ! initialize radiative transfer using current swdn + + call init_flux_atm ! initialize atmosphere fluxes sent to coupler + call init_flux_ocn ! initialize ocean fluxes sent to coupler + + if (write_ic) call accum_hist(dt) ! write initial conditions + + end subroutine cice_init + +!======================================================================= + + subroutine init_restart + + use ice_arrays_column, only: dhsn + use ice_blocks, only: nx_block, ny_block + use ice_calendar, only: calendar + use ice_constants, only: c0 + use ice_domain, only: nblocks + use ice_domain_size, only: ncat, n_iso, n_aero, nfsd + use ice_dyn_eap, only: read_restart_eap + use ice_dyn_shared, only: kdyn + use ice_grid, only: tmask + use ice_init, only: ice_ic + use ice_init_column, only: init_age, init_FY, init_lvl, & + init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & + init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd + use ice_restart_column, only: restart_age, read_restart_age, & + restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & + restart_pond_cesm, read_restart_pond_cesm, & + restart_pond_lvl, read_restart_pond_lvl, & + restart_pond_topo, read_restart_pond_topo, & + restart_fsd, read_restart_fsd, & + restart_iso, read_restart_iso, & + restart_aero, read_restart_aero, & + restart_hbrine, read_restart_hbrine, & + restart_zsal, restart_bgc + use ice_restart_driver, only: restartfile + use ice_restart_shared, only: runtype, restart + use ice_state ! almost everything + + integer(kind=int_kind) :: & + i, j , & ! horizontal indices + iblk ! block index + logical(kind=log_kind) :: & + tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & + tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & + skl_bgc, z_tracers, solve_zsal + integer(kind=int_kind) :: & + ntrcr + integer(kind=int_kind) :: & + nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & + nt_iage, nt_FY, nt_aero, nt_fsd, nt_isosno, nt_isoice + + character(len=*), parameter :: subname = '(init_restart)' + + call icepack_query_tracer_sizes(ntrcr_out=ntrcr) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call icepack_query_parameters(skl_bgc_out=skl_bgc, & + z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) + call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & + tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & + tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & + tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) + call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & + nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, nt_ipnd_out=nt_ipnd, & + nt_iage_out=nt_iage, nt_FY_out=nt_FY, nt_aero_out=nt_aero, nt_fsd_out=nt_fsd, & + nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + if (trim(runtype) == 'continue') then + ! start from core restart file + call restartfile() ! given by pointer in ice_in + call calendar() ! update time parameters + if (kdyn == 2) call read_restart_eap ! EAP + else if (restart) then ! ice_ic = core restart file + call restartfile (ice_ic) ! or 'default' or 'none' + !!! uncomment to create netcdf + ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file + !!! uncomment if EAP restart data exists + ! if (kdyn == 2) call read_restart_eap + endif + + ! tracers + ! ice age tracer + if (tr_iage) then + if (trim(runtype) == 'continue') & + restart_age = .true. + if (restart_age) then + call read_restart_age + else + do iblk = 1, nblocks + call init_age(trcrn(:,:,nt_iage,:,iblk)) + enddo ! iblk + endif + endif + ! first-year area tracer + if (tr_FY) then + if (trim(runtype) == 'continue') restart_FY = .true. + if (restart_FY) then + call read_restart_FY + else + do iblk = 1, nblocks + call init_FY(trcrn(:,:,nt_FY,:,iblk)) + enddo ! iblk + endif + endif + ! level ice tracer + if (tr_lvl) then + if (trim(runtype) == 'continue') restart_lvl = .true. + if (restart_lvl) then + call read_restart_lvl + else + do iblk = 1, nblocks + call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & + trcrn(:,:,nt_vlvl,:,iblk)) + enddo ! iblk + endif + endif + ! CESM melt ponds + if (tr_pond_cesm) then + if (trim(runtype) == 'continue') & + restart_pond_cesm = .true. + if (restart_pond_cesm) then + call read_restart_pond_cesm + else + do iblk = 1, nblocks + call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk)) + enddo ! iblk + endif + endif + ! level-ice melt ponds + if (tr_pond_lvl) then + if (trim(runtype) == 'continue') & + restart_pond_lvl = .true. + if (restart_pond_lvl) then + call read_restart_pond_lvl + else + do iblk = 1, nblocks + call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk), & + dhsn(:,:,:,iblk)) + enddo ! iblk + endif + endif + ! topographic melt ponds + if (tr_pond_topo) then + if (trim(runtype) == 'continue') & + restart_pond_topo = .true. + if (restart_pond_topo) then + call read_restart_pond_topo + else + do iblk = 1, nblocks + call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk)) + enddo ! iblk + endif ! .not. restart_pond + endif + ! floe size distribution + if (tr_fsd) then + if (trim(runtype) == 'continue') restart_fsd = .true. + if (restart_fsd) then + call read_restart_fsd + else + call init_fsd(trcrn(:,:,nt_fsd:nt_fsd+nfsd-1,:,:)) + endif + endif + + ! isotopes + if (tr_iso) then + if (trim(runtype) == 'continue') restart_iso = .true. + if (restart_iso) then + call read_restart_iso + else + do iblk = 1, nblocks + call init_isotope(trcrn(:,:,nt_isosno:nt_isosno+n_iso-1,:,iblk), & + trcrn(:,:,nt_isoice:nt_isoice+n_iso-1,:,iblk)) + enddo ! iblk + endif + endif + + if (tr_aero) then ! ice aerosol + if (trim(runtype) == 'continue') restart_aero = .true. + if (restart_aero) then + call read_restart_aero + else + do iblk = 1, nblocks + call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) + enddo ! iblk + endif ! .not. restart_aero + endif + + if (trim(runtype) == 'continue') then + if (tr_brine) & + restart_hbrine = .true. + if (solve_zsal) & + restart_zsal = .true. + if (skl_bgc .or. z_tracers) & + restart_bgc = .true. + endif + + if (tr_brine .or. skl_bgc) then ! brine height tracer + call init_hbrine + if (tr_brine .and. restart_hbrine) call read_restart_hbrine + endif + + if (solve_zsal .or. skl_bgc .or. z_tracers) then ! biogeochemistry + if (tr_fsd) then + write (nu_diag,*) 'FSD implementation incomplete for use with BGC' + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + endif + call init_bgc + endif + + !----------------------------------------------------------------- + ! aggregate tracers + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j,iblk)) then + call icepack_aggregate(ncat = ncat, & + aicen = aicen(i,j,:,iblk), & + trcrn = trcrn(i,j,:,:,iblk), & + vicen = vicen(i,j,:,iblk), & + vsnon = vsnon(i,j,:,iblk), & + aice = aice (i,j, iblk), & + trcr = trcr (i,j,:,iblk), & + vice = vice (i,j, iblk), & + vsno = vsno (i,j, iblk), & + aice0 = aice0(i,j, iblk), & + ntrcr = ntrcr, & + trcr_depend = trcr_depend, & + trcr_base = trcr_base, & + n_trcr_strata = n_trcr_strata, & + nt_strata = nt_strata) + else + ! tcraig, reset all tracer values on land to zero + trcrn(i,j,:,:,iblk) = c0 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + end subroutine init_restart + +!======================================================================= + + end module CICE_InitMod + +!======================================================================= diff --git a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 new file mode 100644 index 000000000..5a4b3d54e --- /dev/null +++ b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 @@ -0,0 +1,730 @@ + + program gridavgchk + + ! This tests the CICE grid_average_X2Y methods by + ! using CICE_InitMod (from the standalone model) to read/initialize + ! a CICE grid/configuration. Then methods in grid_average_X2Y + ! are verified using hardwired inputs with known outputs. + ! There are lots of issues here + ! areas (T, U, N, E) are not locally conservative, affect X2YF + ! X2YF is unmasked which can create havoc in U2T type directions + ! X2YS is masked but there can be no active cells to average (for instance, + ! single gridcell wide channels U2T where resuilt is zero) + ! land block elimination can lead to missing data on halo + ! This test tries to deal with all these things.... + + use CICE_InitMod + use ice_kinds_mod, only: int_kind, dbl_kind + use ice_blocks, only: block, get_block, nx_block, ny_block, nblocks_tot + use ice_boundary, only: ice_HaloUpdate + use ice_constants, only: c0, c1, c2, p25, & + field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_loc_Eface, field_type_scalar + use ice_communicate, only: my_task, master_task, get_num_procs, MPI_COMM_ICE + use ice_distribution, only: ice_distributionGetBlockID, ice_distributionGet + use ice_domain_size, only: nx_global, ny_global, & + block_size_x, block_size_y, max_blocks + use ice_domain, only: distrb_info, halo_info, landblockelim + use ice_fileunits, only: bfbflag + use ice_exit, only: abort_ice, end_run + use ice_global_reductions, only: global_minval, global_maxval + use ice_grid, only: grid_average_X2Y,tarea,uarea,narea,earea,tmask,umask,nmask,emask, & + hm,uvm,epm,npm + + implicit none + + integer(int_kind) :: i, j, n, ib, ie, jb, je, iblock + integer(int_kind) :: iglob, jglob + integer(int_kind) :: blockID, numBlocks + type (block) :: this_block + + real(dbl_kind) ,allocatable :: array1x(:,:,:), array1y(:,:,:) ! input + real(dbl_kind) ,allocatable :: arraysx(:,:,:), arraysy(:,:,:) ! extra input for NE2T, NE2U + real(dbl_kind) ,allocatable :: array2x(:,:,:), array2y(:,:,:) ! output + real(dbl_kind) ,allocatable :: array3x(:,:,:), array3y(:,:,:) ! error + real(dbl_kind) ,allocatable :: wght1(:,:,:), mask1(:,:,:), array2z(:,:,:) ! extra for explicit + real(dbl_kind) :: amin, amax, fmax, errtol, errx, erry + real(dbl_kind) :: deltax0, deltay0, deltax, deltay + + integer(int_kind), parameter :: maxtests = 3 + integer(int_kind), parameter :: maxgroups = 4 + integer(int_kind) :: numtests_cnt, numgroups_cnt + character(len=16) :: numtests_name(maxtests) + integer(int_kind) :: nbase(maxgroups) + character(len=16) :: numgroups_name(maxgroups) + real(dbl_kind) :: errmax(maxgroups,maxtests) + integer(int_kind) :: npes, ierr, ntask, testcnt, tottest, mtests, cnt, ng + integer(int_kind) :: errorflag0,gflag + integer(int_kind), allocatable :: errorflag(:) + character(len=32), allocatable :: stringflag(:) + integer(int_kind), parameter :: & + passflag = 0, & + failflag = 1 + character(len=8), allocatable :: avgname(:) + logical, allocatable :: dmask(:,:,:,:) + real(dbl_kind), allocatable :: errtolconst(:),errtolijind(:),errtolarea(:) + + real(dbl_kind), parameter :: fillval = -1.0e36_dbl_kind + real(dbl_kind), parameter :: testconst = 100._dbl_kind + character(len=*), parameter :: subname='(gridavgchk)' + + !----------------------------------------------------------------- + ! Initialize CICE + !----------------------------------------------------------------- + + call CICE_Initialize + npes = get_num_procs() + + numtests_name(1) = 'constant' + numtests_name(2) = 'ijindex' + numtests_name(3) = 'area' + numgroups_name(1) = 'X2YA' + numgroups_name(2) = 'X2YS' + numgroups_name(3) = 'X2YF' + numgroups_name(4) = 'NE2YS' + nbase(1) = 16 + nbase(2) = 16 + nbase(3) = 0 + nbase(4) = 4 + errmax = c0 + + if (.not. landblockelim) nbase(3) = nbase(2) ! no land block elimination, can test F mappings + mtests = nbase(1) + nbase(2) + nbase(3) + nbase(4) + + allocate(avgname(mtests)) + allocate(errtolconst(mtests)) + allocate(errtolijind(mtests)) + allocate(errtolarea(mtests)) + errtolconst = c0 + errtolijind = c0 + errtolarea = c0 + tottest = maxtests * mtests + allocate(errorflag(tottest)) + allocate(stringflag(tottest)) + allocate(dmask(nx_block,ny_block,max_blocks,mtests)) + + n = 0 + errtolconst(n+1:n+nbase(1)) = 0.00001_dbl_kind + errtolijind(n+1:n+nbase(1)) = 0.10_dbl_kind + errtolarea (n+1:n+nbase(1)) = 0.04_dbl_kind + if (nx_global > 200 .and. ny_global > 200) then + errtolijind(n+1:n+nbase(1)) = 0.03_dbl_kind + errtolarea (n+1:n+nbase(1)) = 0.003_dbl_kind + endif + n=n+1; avgname(n) = 'T2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'T2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'T2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2EA' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'U2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'U2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'U2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'U2EA' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'N2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'N2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'N2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'N2EA' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'E2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'E2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'E2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'E2EA' ; dmask(:,:,:,n) = emask(:,:,:) + + errtolconst(n+1:n+nbase(2)) = 0.00001_dbl_kind + errtolijind(n+1:n+nbase(2)) = 0.51_dbl_kind + errtolarea (n+1:n+nbase(2)) = 0.19_dbl_kind + if (nx_global > 200 .and. ny_global > 200) then + errtolarea (n+1:n+nbase(2)) = 0.06_dbl_kind + endif + n=n+1; avgname(n) = 'T2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'T2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'T2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2ES' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'U2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'U2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'U2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'U2ES' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'N2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'N2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'N2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'N2ES' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'E2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'E2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'E2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'E2ES' ; dmask(:,:,:,n) = emask(:,:,:) + + if (nbase(3) > 0) then + errtolconst(n+1:n+nbase(3)) = 0.0065_dbl_kind + errtolijind(n+1:n+nbase(3)) = 0.65_dbl_kind + errtolarea (n+1:n+nbase(3)) = 0.04_dbl_kind + if (nx_global > 200 .and. ny_global > 200) then + errtolijind(n+1:n+nbase(3)) = 0.22_dbl_kind + errtolarea (n+1:n+nbase(3)) = 0.004_dbl_kind + endif + n=n+1; avgname(n) = 'T2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'T2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'T2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2EF' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'U2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'U2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'U2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'U2EF' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'N2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'N2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'N2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'N2EF' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'E2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'E2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'E2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'E2EF' ; dmask(:,:,:,n) = emask(:,:,:) + endif + + errtolconst(n+1:n+nbase(4)) = 0.00001_dbl_kind + errtolijind(n+1:n+nbase(4)) = 0.51_dbl_kind + errtolarea (n+1:n+nbase(4)) = 0.12_dbl_kind + if (nx_global > 200 .and. ny_global > 200) then + errtolijind(n+1:n+nbase(4)) = 0.26_dbl_kind + errtolarea (n+1:n+nbase(4)) = 0.03_dbl_kind + endif + n=n+1; avgname(n) = 'NE2TS'; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'EN2TS'; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'NE2US'; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'EN2US'; dmask(:,:,:,n) = umask(:,:,:) + + if (n /= mtests) then + call abort_ice(subname//' n ne mtests') + endif + + !----------------------------------------------------------------- + ! Testing + !----------------------------------------------------------------- + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) '==========================================================' + write(6,*) ' ' + write(6,*) 'RunningUnitTest GRIDAVGCHK' + write(6,*) ' ' + write(6,*) ' npes = ',npes + write(6,*) ' my_task = ',my_task + write(6,*) ' nx_global = ',nx_global + write(6,*) ' ny_global = ',ny_global + write(6,*) ' block_size_x = ',block_size_x + write(6,*) ' block_size_y = ',block_size_y + write(6,*) ' nblocks_tot = ',nblocks_tot + write(6,*) ' tottest = ',tottest + write(6,*) ' ' + endif + + errorflag0 = passflag + errorflag = passflag + stringflag = ' ' + + ! --------------------------- + ! TEST GRID AVERAGES + ! --------------------------- + + if (my_task == master_task) write(6,*) ' ' + + allocate(array1x(nx_block,ny_block,max_blocks)) + allocate(array1y(nx_block,ny_block,max_blocks)) + allocate(arraysx(nx_block,ny_block,max_blocks)) + allocate(arraysy(nx_block,ny_block,max_blocks)) + allocate(array2x(nx_block,ny_block,max_blocks)) + allocate(array2y(nx_block,ny_block,max_blocks)) + allocate(array3x(nx_block,ny_block,max_blocks)) + allocate(array3y(nx_block,ny_block,max_blocks)) + allocate(wght1 (nx_block,ny_block,max_blocks)) + allocate(mask1 (nx_block,ny_block,max_blocks)) + allocate(array2z(nx_block,ny_block,max_blocks)) + + call ice_distributionGet(distrb_info, numLocalBlocks = numBlocks) + + testcnt = 0 + + !---------------- + ! Test constant field + !---------------- + + numtests_cnt = 1 + if (my_task == master_task) then + write(6,*) '' + write(6,*) 'TEST constant field, test ',numtests_cnt + endif + + array1x = testconst + arraysx = testconst + + do n = 1,mtests + testcnt = testcnt + 1 + + cnt = 0 + do ng = 1,maxgroups + if (n > cnt) numgroups_cnt = ng + cnt = cnt + nbase(ng) + enddo + + errtol = errtolconst(n) + stringflag(testcnt) = trim(avgname(n))//' const' + if (my_task == master_task) then + write(6,*) '' + write(6,110) trim(stringflag(testcnt))//' test ',testcnt,errtol,numtests_cnt,numgroups_cnt + endif + + array2x = c0 + if (len_trim(avgname(n)) == 4) then + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),array2x,avgname(n)(3:3)) + else ! len_trim(avgname(n)) == 5 + if (avgname(n)(1:2) == 'NE') then + call grid_average_X2Y(avgname(n)(5:5),array1x,avgname(n)(1:1),arraysx,avgname(n)(2:2),array2x,avgname(n)(4:4)) + else ! EN, swap needed + call grid_average_X2Y(avgname(n)(5:5),arraysx,avgname(n)(1:1),array1x,avgname(n)(2:2),array2x,avgname(n)(4:4)) + endif + endif + + array3x = c0 + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + do j = jb,je + jglob = this_block%j_glob(j) + do i = ib,ie + iglob = this_block%i_glob(i) + array3x(i,j,iblock) = (array2x(i,j,iblock) - testconst)/testconst + ! if array2 is c0, then there are no valid surrounding points and ignore it + if (array2x(i,j,iblock) == c0) array3x(i,j,iblock) = c0 + errx = abs(array3x(i,j,iblock)) + ! flag points that are active and error numerically + if (dmask(i,j,iblock,n) .and. errx > errtol .and. array2x(i,j,iblock) /= c0) then + errorflag(testcnt) = failflag + errorflag0 = failflag + write(100+my_task,*) '' + write(100+my_task,100) 'error const '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob + write(100+my_task,101) 'value, error ',array2x(i,j,iblock),errx + endif + enddo + enddo + enddo + gflag = global_maxval(errorflag(testcnt), MPI_COMM_ICE) + if (my_task == master_task .and. gflag == failflag) write(6,*) ' *** FAIL ***' + amin = global_minval(array1x, distrb_info) + amax = global_maxval(array1x, distrb_info) + if (my_task == master_task) write(6,102) 'input min/max = ',amin,amax + amin = global_minval(array2x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array2x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'result min/max = ',amin,amax + amin = global_minval(array3x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array3x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'error min/max = ',amin,amax + amax = global_maxval(abs(array3x), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) + enddo + + !---------------- + ! Test global i, j fields + ! for NE2T, NE2U, inputs should result in exact calcs + !---------------- + + numtests_cnt = 2 + if (my_task == master_task) then + write(6,*) '' + write(6,*) 'TEST global i, j fields, test ',numtests_cnt + endif + + array1x = -999. + arraysx = -999. + + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + do j = jb,je + do i = ib,ie + array1x(i,j,iblock) = real(this_block%i_glob(i),kind=dbl_kind) + array1y(i,j,iblock) = real(this_block%j_glob(j),kind=dbl_kind) + enddo + enddo + enddo + + ! Fill in ghost cells with locally appropriate value + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + ! skip corners + do i = ib,ie + array1x(i,jb-1,iblock) = array1x(i,jb,iblock) + array1y(i,jb-1,iblock) = array1y(i,jb,iblock) - 1.0_dbl_kind + array1x(i,je+1,iblock) = array1x(i,je,iblock) + array1y(i,je+1,iblock) = array1y(i,je,iblock) + 1.0_dbl_kind + enddo + ! set corners + do j = 1,ny_block + array1x(ib-1,j,iblock) = array1x(ib,j,iblock) - 1.0_dbl_kind + array1y(ib-1,j,iblock) = array1y(ib,j,iblock) + array1x(ie+1,j,iblock) = array1x(ie,j,iblock) + 1.0_dbl_kind + array1y(ie+1,j,iblock) = array1y(ie,j,iblock) + enddo + enddo + + arraysx = array1x + 0.5_dbl_kind + arraysy = array1y - 0.5_dbl_kind + + do n = 1,mtests + testcnt = testcnt + 1 + + cnt = 0 + do ng = 1,maxgroups + if (n > cnt) numgroups_cnt = ng + cnt = cnt + nbase(ng) + enddo + + stringflag(testcnt) = trim(avgname(n))//' ijind' + if (my_task == master_task) then + write(6,*) '' + write(6,110) trim(stringflag(testcnt))//' test ',testcnt,errtolijind(n),numtests_cnt,numgroups_cnt + endif + + deltax0 = 0.0_dbl_kind + deltay0 = 0.0_dbl_kind + if (avgname(n)(1:3) == 'T2U' .or. & + avgname(n)(1:3) == 'T2E' .or. & + avgname(n)(1:3) == 'N2U' .or. & + avgname(n)(1:3) == 'N2E' .or. & + avgname(n)(1:4) == 'NE2U'.or. & + avgname(n)(1:4) == 'EN2U') then + deltax0 = 0.5_dbl_kind + elseif (avgname(n)(1:3) == 'U2T' .or. & + avgname(n)(1:3) == 'U2N' .or. & + avgname(n)(1:3) == 'E2T' .or. & + avgname(n)(1:3) == 'E2N' ) then + deltax0 = -0.5_dbl_kind + endif + if (avgname(n)(1:3) == 'T2U' .or. & + avgname(n)(1:3) == 'T2N' .or. & + avgname(n)(1:3) == 'E2U' .or. & + avgname(n)(1:3) == 'E2N' ) then + deltay0 = 0.5_dbl_kind + elseif (avgname(n)(1:3) == 'U2T' .or. & + avgname(n)(1:3) == 'U2E' .or. & + avgname(n)(1:3) == 'N2T' .or. & + avgname(n)(1:3) == 'N2E' .or. & + avgname(n)(1:4) == 'NE2T'.or. & + avgname(n)(1:4) == 'EN2T') then + deltay0 = -0.5_dbl_kind + endif + + array2x = c0 + array2y = c0 + if (len_trim(avgname(n)) == 4) then + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),array2x,avgname(n)(3:3)) + call grid_average_X2Y(avgname(n)(4:4),array1y,avgname(n)(1:1),array2y,avgname(n)(3:3)) + else ! len_trim(avgname(n)) == 5 + if (avgname(n)(1:2) == 'NE') then + call grid_average_X2Y(avgname(n)(5:5),array1x,avgname(n)(1:1),arraysx,avgname(n)(2:2),array2x,avgname(n)(4:4)) + call grid_average_X2Y(avgname(n)(5:5),array1y,avgname(n)(1:1),arraysy,avgname(n)(2:2),array2y,avgname(n)(4:4)) + else ! EN, swap needed array1 is N, arrays is E + call grid_average_X2Y(avgname(n)(5:5),arraysx,avgname(n)(1:1),array1x,avgname(n)(2:2),array2x,avgname(n)(4:4)) + call grid_average_X2Y(avgname(n)(5:5),arraysy,avgname(n)(1:1),array1y,avgname(n)(2:2),array2y,avgname(n)(4:4)) + endif + endif + + array3x = c0 + errtol = errtolijind(n) + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + do j = jb,je + jglob = this_block%j_glob(j) + do i = ib,ie + iglob = this_block%i_glob(i) + deltax = deltax0 + deltay = deltay0 + array3x(i,j,iblock) = array2x(i,j,iblock)-array1x(i,j,iblock)-deltax + ! if array2 is c0, then there are no valid surrounding points and ignore it + if (array2x(i,j,iblock) == c0) array3x(i,j,iblock) = c0 + errx = abs(array3x(i,j,iblock)) + array3y(i,j,iblock) = array2y(i,j,iblock)-array1y(i,j,iblock)-deltay + ! if array2 is c0, then there are no valid surrounding points and ignore it + if (array2y(i,j,iblock) == c0) array3y(i,j,iblock) = c0 + erry = abs(array3y(i,j,iblock)) + ! flag points that are active and error numerically + if (dmask(i,j,iblock,n) .and. (errx > errtol .or. erry > errtol)) then + errorflag(testcnt) = failflag + errorflag0 = failflag + write(100+my_task,*) '' + write(100+my_task,100) 'error ijind '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob + write(100+my_task,101) 'array2x, err',array2x(i,j,iblock),errx + write(100+my_task,101) 'array1x j+1 ',array1x(i-1,j+1,iblock),array1x(i,j+1,iblock),array1x(i+1,j+1,iblock) + write(100+my_task,101) 'array1x j ',array1x(i-1,j ,iblock),array1x(i,j ,iblock),array1x(i+1,j ,iblock) + write(100+my_task,101) 'array1x j-1 ',array1x(i-1,j-1,iblock),array1x(i,j-1,iblock),array1x(i+1,j-1,iblock) + write(100+my_task,101) 'array2y, err',array2y(i,j,iblock),erry + write(100+my_task,101) 'array1y j+1 ',array1y(i-1,j+1,iblock),array1y(i,j+1,iblock),array1y(i+1,j+1,iblock) + write(100+my_task,101) 'array1y j ',array1y(i-1,j ,iblock),array1y(i,j ,iblock),array1y(i+1,j ,iblock) + write(100+my_task,101) 'array1y j-1 ',array1y(i-1,j-1,iblock),array1y(i,j-1,iblock),array1y(i+1,j-1,iblock) + write(100+my_task,101) 'tarea ',tarea(i,j,iblock) + write(100+my_task,101) 'uarea j+1 ',uarea (i-1,j+1,iblock),uarea (i,j+1,iblock),uarea (i+1,j+1,iblock) + write(100+my_task,101) 'uarea j ',uarea (i-1,j ,iblock),uarea (i,j ,iblock),uarea (i+1,j ,iblock) + write(100+my_task,101) 'uarea j-1 ',uarea (i-1,j-1,iblock),uarea (i,j-1,iblock),uarea (i+1,j-1,iblock) + write(100+my_task,101) 'hm j+1 ',hm (i-1,j+1,iblock),hm (i,j+1,iblock),hm (i+1,j+1,iblock) + write(100+my_task,101) 'hm j ',hm (i-1,j ,iblock),hm (i,j ,iblock),hm (i+1,j ,iblock) + write(100+my_task,101) 'hm j-1 ',hm (i-1,j-1,iblock),hm (i,j-1,iblock),hm (i+1,j-1,iblock) + write(100+my_task,101) 'uvm j+1 ',uvm (i-1,j+1,iblock),uvm (i,j+1,iblock),uvm (i+1,j+1,iblock) + write(100+my_task,101) 'uvm j ',uvm (i-1,j ,iblock),uvm (i,j ,iblock),uvm (i+1,j ,iblock) + write(100+my_task,101) 'uvm j-1 ',uvm (i-1,j-1,iblock),uvm (i,j-1,iblock),uvm (i+1,j-1,iblock) + endif + enddo + enddo + enddo + + gflag = global_maxval(errorflag(testcnt), MPI_COMM_ICE) + if (my_task == master_task .and. gflag == failflag) write(6,*) ' *** FAIL ***' + amin = global_minval(array1x, distrb_info) + amax = global_maxval(array1x, distrb_info) + if (my_task == master_task) write(6,102) 'i_glob min/max = ',amin,amax + amin = global_minval(array1y, distrb_info) + amax = global_maxval(array1y, distrb_info) + if (my_task == master_task) write(6,102) 'j_glob min/max = ',amin,amax + amin = global_minval(array2x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array2x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'i result min/max = ',amin,amax + amin = global_minval(array2y, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array2y, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'j result min/max = ',amin,amax + amin = global_minval(array3x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array3x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'i error min/max = ',amin,amax + amin = global_minval(array3y, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array3y, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'j error min/max = ',amin,amax + amax = global_maxval(abs(array3x), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) + amax = global_maxval(abs(array3y), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) + + enddo + + !---------------- + ! Test area fields + !---------------- + + numtests_cnt = 3 + if (my_task == master_task) then + write(6,*) '' + write(6,*) 'TEST area fields, test ',numtests_cnt + endif + + do n = 1,mtests + testcnt = testcnt + 1 + + cnt = 0 + do ng = 1,maxgroups + if (n > cnt) numgroups_cnt = ng + cnt = cnt + nbase(ng) + enddo + + stringflag(testcnt) = trim(avgname(n))//' area' + if (my_task == master_task) then + write(6,*) '' + write(6,110) trim(stringflag(testcnt))//' test ',testcnt,errtolarea(n),numtests_cnt,numgroups_cnt + endif + + array1x = -999. + arraysx = -999. + mask1 = -999. + wght1 = -999. + if (avgname(n)(1:2) == 'T2') then + array1x = tarea + wght1 = tarea + mask1 = hm + elseif (avgname(n)(1:2) == 'U2') then + array1x = uarea + wght1 = uarea + mask1 = uvm + elseif (avgname(n)(1:2) == 'E2') then + array1x = earea + wght1 = earea + mask1 = epm + elseif (avgname(n)(1:2) == 'N2') then + array1x = narea + wght1 = narea + mask1 = npm + elseif (avgname(n)(1:3) == 'NE2') then + array1x = narea + arraysx = earea + elseif (avgname(n)(1:3) == 'EN2') then + array1x = earea + arraysx = narea + else + call abort_ice(subname//' avgname not supported 1x = '//trim(avgname(n))) + endif + + array2y = -999. + if (avgname(n)(2:3) == '2T' .or. & + avgname(n)(3:4) == '2T') then + array2y = tarea + elseif (avgname(n)(2:3) == '2U' .or. & + avgname(n)(3:4) == '2U') then + array2y = uarea + elseif (avgname(n)(2:3) == '2E') then + array2y = earea + elseif (avgname(n)(2:3) == '2N') then + array2y = narea + else + call abort_ice(subname//' avgname not supported 2y = '//trim(avgname(n))) + endif + + array2x = c0 + if (len_trim(avgname(n)) == 4) then +! call grid_average_X2Y(trim(avgname(n)),array1x,array2x) + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),array2x,avgname(n)(3:3)) + ! ------ + ! Extra Explicit Calc Test + ! ------ + if (avgname(n)(2:2) == '2' .and. (avgname(n)(4:4) == 'S' .or. avgname(n)(4:4) == 'A')) then + stringflag(testcnt) = trim(stringflag(testcnt))//' + explicit' + if (avgname(n)(4:4) == 'S') then + ! test direct mapping compared to S, array1x*wght1*mask1 where wght1=area and mask1=mask + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),wght1,mask1,array2z,avgname(n)(3:3)) + elseif (avgname(n)(4:4) == 'A') then + ! test direct mapping compared to A, array1x*wght1 where wght1=area and mask1=1.0 + mask1 = c1 + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),wght1,mask1,array2z,avgname(n)(3:3)) + endif + fmax = global_maxval(abs(array1x), distrb_info) + amax = global_maxval(abs(array2z-array2x), distrb_info) +! tcraig, errtol=c0 doesn't work here, diff seems smaller than roundoff? - interesting +! errtol = c0 + errtol = 1.0e-20_dbl_kind + if (amax < fmax * errtol) then + if (my_task == master_task) write(6,103) 'PASS explicit avg vs implicit avg ',errtol + else + errorflag(testcnt) = failflag + errorflag0 = failflag + if (my_task == master_task) write(6,103) 'FAIL explicit avg vs implicit avg ',amax,fmax*errtol + amin = global_minval(array2x, distrb_info) + amax = global_maxval(array2x, distrb_info) + if (my_task == master_task) write(6,103) 'output min/max = ',amin,amax + amin = global_minval(array2z, distrb_info) + amax = global_maxval(array2z, distrb_info) + if (my_task == master_task) write(6,103) 'expout min/max = ',amin,amax + endif + endif + + else ! len_trim(avgname(n)) == 5 + ! no swap needed 1x and sx set based on NE or EN + call grid_average_X2Y(avgname(n)(5:5),array1x,avgname(n)(1:1),arraysx,avgname(n)(2:2),array2x,avgname(n)(4:4)) + endif + + array3x = c1 + array3y = c1 + + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + do j = jb,je + jglob = this_block%j_glob(j) + do i = ib,ie + iglob = this_block%i_glob(i) + array3x(i,j,iblock) = array2x(i,j,iblock)/array2y(i,j,iblock) - c1 + ! if array2 is c0, then there are no valid surrounding points and ignore it + if (array2x(i,j,iblock) == c0) array3x(i,j,iblock) = c0 + errx = abs(array3x(i,j,iblock)) + ! flag points that are active and error numerically + if (dmask(i,j,iblock,n) .and. errx > errtolarea(n)) then + errorflag(testcnt) = failflag + errorflag0 = failflag + write(100+my_task,*) '' + write(100+my_task,100) 'error area '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob + write(100+my_task,101) 'out,exact,err',array2x(i,j,iblock),array2y(i,j,iblock),array3x(i,j,iblock) + endif + enddo + enddo + enddo + gflag = global_maxval(errorflag(testcnt), MPI_COMM_ICE) + if (my_task == master_task .and. gflag == failflag) write(6,*) ' *** FAIL ***' + amin = global_minval(array1x, distrb_info) + amax = global_maxval(array1x, distrb_info) + if (my_task == master_task) write(6,103) 'input min/max = ',amin,amax + amin = global_minval(array2x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array2x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,103) 'output min/max = ',amin,amax + amin = global_minval(array2y, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array2y, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,103) 'exact min/max = ',amin,amax + amin = global_minval(array3x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array3x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'error min/max = ',amin,amax + amax = global_maxval(abs(array3x), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) + enddo + +100 format(a,10i8) +101 format(a,3g16.7) +102 format(a,3f16.7) +103 format(a,2g16.7,f16.7) +110 format(a,i8,g16.7,6i8) + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) 'Max Errors:' + do i = 1,maxgroups + do j = 1,maxtests + write(6,'(2x,a16,2x,a16,2x,f23.16)') trim(numgroups_name(i)),trim(numtests_name(j)),errmax(i,j) + enddo + enddo + endif + + gflag = global_maxval(errorflag0, MPI_COMM_ICE) + errorflag0 = gflag + do n = 1,tottest + gflag = global_maxval(errorflag(n), MPI_COMM_ICE) + errorflag(n) = gflag + enddo + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) 'GRIDAVGCHK COMPLETED SUCCESSFULLY' + do n = 1,tottest + if (errorflag(n) == passflag) then + write(6,*) 'PASS ',trim(stringflag(n)) + else + write(6,*) 'FAIL ',trim(stringflag(n)) + endif + enddo + if (errorflag0 == passflag) then + write(6,*) 'GRIDAVGCHK TEST COMPLETED SUCCESSFULLY' + else + write(6,*) 'GRIDAVGCHK TEST FAILED' + endif + write(6,*) ' ' + write(6,*) '==========================================================' + write(6,*) ' ' + endif + + + !----------------------------------------------------------------- + ! Gracefully end + !----------------------------------------------------------------- + + call end_run() + + end program gridavgchk + +!======================================================================= diff --git a/cicecore/shared/ice_restart_column.F90 b/cicecore/shared/ice_restart_column.F90 index 074b37dbe..6ce393190 100644 --- a/cicecore/shared/ice_restart_column.F90 +++ b/cicecore/shared/ice_restart_column.F90 @@ -913,6 +913,7 @@ subroutine write_restart_hbrine() use ice_blocks, only: block, get_block use ice_domain, only: nblocks, blocks_ice use ice_fileunits, only: nu_dump_hbrine + use ice_grid, only: tmask use ice_state, only: trcrn use ice_restart,only: write_restart_field @@ -949,7 +950,8 @@ subroutine write_restart_hbrine() do j = jlo, jhi do i = ilo, ihi do n = 1, ncat - if (first_ice (i,j,n,iblk)) then + ! zero out first_ice over land + if (tmask(i,j,iblk) .and. first_ice (i,j,n,iblk)) then first_ice_real(i,j,n,iblk) = c1 else first_ice_real(i,j,n,iblk) = c0 @@ -983,6 +985,7 @@ subroutine write_restart_bgc() use ice_fileunits, only: nu_dump_bgc use ice_flux_bgc, only: nit, amm, sil, dmsp, dms, algalN, & doc, don, dic, fed, fep, zaeros, hum + use ice_grid, only: tmask use ice_state, only: trcrn use ice_flux, only: sss use ice_restart, only: write_restart_field @@ -1058,6 +1061,39 @@ subroutine write_restart_bgc() diag = .true. + !----------------------------------------------------------------- + ! Zero out tracers over land + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then + if (tr_bgc_N ) algalN(i,j,:,iblk) = c0 + if (tr_bgc_C ) doc (i,j,:,iblk) = c0 + if (tr_bgc_C ) dic (i,j,:,iblk) = c0 + if (tr_bgc_Nit) nit (i,j ,iblk) = c0 + if (tr_bgc_Am ) amm (i,j ,iblk) = c0 + if (tr_bgc_Sil) sil (i,j ,iblk) = c0 + if (tr_bgc_hum) hum (i,j ,iblk) = c0 + if (tr_bgc_DMS) dms (i,j ,iblk) = c0 + if (tr_bgc_DMS) dmsp (i,j ,iblk) = c0 + if (tr_bgc_DON) don (i,j,:,iblk) = c0 + if (tr_bgc_Fe ) fed (i,j,:,iblk) = c0 + if (tr_bgc_Fe ) fep (i,j,:,iblk) = c0 + if (solve_zsal) sss (i,j ,iblk) = c0 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + !----------------------------------------------------------------- ! Salinity and extras !----------------------------------------------------------------- diff --git a/configuration/scripts/Makefile b/configuration/scripts/Makefile index 07ee380a8..0322513d2 100644 --- a/configuration/scripts/Makefile +++ b/configuration/scripts/Makefile @@ -74,7 +74,7 @@ AR := ar .SUFFIXES: -.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk bcstchk +.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk bcstchk gridavgchk all: $(EXEC) cice: $(EXEC) @@ -93,7 +93,7 @@ targets: @echo " " @echo "Supported Makefile Targets are: cice, libcice, makdep, depends, clean, realclean" @echo " Diagnostics: targets, db_files, db_flags" - @echo " Unit Tests : helloworld, calchk, sumchk, bcstchk" + @echo " Unit Tests : helloworld, calchk, sumchk, bcstchk, gridavgchk" target: targets db_files: @@ -149,6 +149,8 @@ sumchk: $(EXEC) bcstchk: $(EXEC) +gridavgchk: $(EXEC) + # this builds just a subset of source code specified explicitly and requires a separate target HWOBJS := helloworld.o diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index 85191dbf6..7a1334532 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -122,6 +122,22 @@ cat >> ${jobfile} << EOFB ###PBS -m be EOFB +else if (${ICE_MACHINE} =~ nrlssc*) then +# nrlssc queue system has nodes with different task per node +if (${taskpernode} <= 12) set tpnstr = 'twelve' +if (${taskpernode} == 20) set tpnstr = 'twenty' +if (${taskpernode} == 24) set tpnstr = 'twentyfour' +if (${taskpernode} == 28) set tpnstr = 'twentyeight' + +cat >> ${jobfile} <&! \$ICE_RUNLOG_FILE EOFR endif +#======= +else if (${ICE_MACHINE} =~ nrlssc*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + #======= else if (${ICE_MACHINE} =~ onyx* || ${ICE_MACHINE} =~ narwhal) then cat >> ${jobfile} << EOFR diff --git a/configuration/scripts/cice_decomp.csh b/configuration/scripts/cice_decomp.csh index aa1bb9a54..0c6715f3b 100755 --- a/configuration/scripts/cice_decomp.csh +++ b/configuration/scripts/cice_decomp.csh @@ -66,6 +66,17 @@ else if (${grid} == 'gbox80') then set blckx = 8; set blcky = 8 endif +else if (${grid} == 'gbox12') then + set nxglob = 12 + set nyglob = 12 + if (${cicepes} <= 1) then + set blckx = 12; set blcky = 12 + else if (${cicepes} <= 8) then + set blckx = 4; set blcky = 4 + else + set blckx = 2; set blcky = 2 + endif + else if (${grid} == 'gx3') then set nxglob = 100 set nyglob = 116 diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 7ed806edf..25130b131 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -39,6 +39,7 @@ print_global = .true. print_points = .true. timer_stats = .false. + memory_stats = .false. conserv_check = .false. latpnt(1) = 90. lonpnt(1) = 0. @@ -61,7 +62,11 @@ &grid_nml grid_format = 'bin' grid_type = 'displaced_pole' + grid_ice = 'B' + grid_atm = 'A' + grid_ocn = 'A' grid_file = 'grid' + kmt_type = 'file' kmt_file = 'kmt' bathymetry_file = 'unknown_bathymetry_file' bathymetry_format = 'default' @@ -144,6 +149,11 @@ Ktens = 0. e_yieldcurve = 2. e_plasticpot = 2. + visc_method = 'avg_strength' + elasticDamp = 0.36d0 + deltaminEVP = 1e-11 + deltaminVP = 2e-9 + capping = 1. seabed_stress = .false. seabed_stress_method = 'LKD' k1 = 7.5 @@ -255,7 +265,9 @@ ocn_data_type = 'default' bgc_data_type = 'default' fe_data_type = 'default' - ice_data_type = 'default' + ice_data_type = 'latsst' + ice_data_conc = 'parabolic' + ice_data_dist = 'uniform' fyear_init = 2005 ycycle = 1 atm_data_format = 'bin' @@ -439,13 +451,22 @@ &icefields_nml f_tmask = .true. + f_umask = .false. + f_nmask = .false. + f_emask = .false. f_blkmask = .true. f_tarea = .true. f_uarea = .true. + f_narea = .false. + f_earea = .false. f_dxt = .false. f_dyt = .false. f_dxu = .false. f_dyu = .false. + f_dxe = .false. + f_dye = .false. + f_dxn = .false. + f_dyn = .false. f_HTN = .false. f_HTE = .false. f_ANGLE = .true. diff --git a/configuration/scripts/machines/Macros.narwhal_aocc b/configuration/scripts/machines/Macros.narwhal_aocc index 44b1dc2f6..95f301e85 100644 --- a/configuration/scripts/machines/Macros.narwhal_aocc +++ b/configuration/scripts/machines/Macros.narwhal_aocc @@ -3,7 +3,7 @@ #============================================================================== CPP := ftn -E -CPPDEFS := -DNO_R16 -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 ${ICE_CPPDEFS} CFLAGS := -c -O2 FIXEDFLAGS := -ffixed-form diff --git a/configuration/scripts/machines/Macros.narwhal_cray b/configuration/scripts/machines/Macros.narwhal_cray index ab0e6378e..8496f7a9b 100644 --- a/configuration/scripts/machines/Macros.narwhal_cray +++ b/configuration/scripts/machines/Macros.narwhal_cray @@ -3,7 +3,7 @@ #============================================================================== CPP := ftn -e P -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 ${ICE_CPPDEFS} CFLAGS := -c -O2 FIXEDFLAGS := -132 diff --git a/configuration/scripts/machines/Macros.nrlssc_gnu b/configuration/scripts/machines/Macros.nrlssc_gnu new file mode 100644 index 000000000..91d2fae0b --- /dev/null +++ b/configuration/scripts/machines/Macros.nrlssc_gnu @@ -0,0 +1,59 @@ +#============================================================================== +# Makefile macros for NRLSSC GCC and openmpi compilers +#============================================================================== + +# specific Netcdf and MPI paths, since we use /common instead of /usr + +#MPIHOME := /common/openmpi/gnu10.1.0/3.1.6 +#CDFHOME := /common/netcdf/gnu10.1.0/openmpi3.1.6/4.7.4 + +# use MY defined vars from .setenv_linux +MPIHOME := ${MPI_HOME} +CDFHOME := ${NETCDF_HOME} + +CPP := cpp +CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 -DHAVE_F2008_CONTIGUOUS -DLINUX -DCPRINTEL ${ICE_CPPDEFS} +CFLAGS := -c -O2 + +FIXEDFLAGS := -ffixed-line-length-132 +FREEFLAGS := -ffree-form +FFLAGS := -fconvert=big-endian -fbacktrace -ffree-line-length-none +FFLAGS_NOOPT := -O0 + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow +else + FFLAGS += -O2 +endif + +FC := $(MPIHOME)/bin/mpif90 + +CC:= $(MPIHOME)/bin/mpicc + +MPICC:= $(MPIHOME)/bin/mpicc + +MPIFC:= $(MPIHOME)/bin/mpif90 +LD:= $(FC) + +NETCDF_PATH := $(CDFHOME) + +ifeq ($(ICE_IOTYPE), netcdf) + # NETCDF_PATH := $(shell nc-config --prefix) + INCLDIR := $(INCLDIR) -I$(NETCDF_PATH)/include + LIB_NETCDF := $(NETCDF_PATH)/lib + LIB_PNETCDF := + LD := $(LD) -Wl,-rpath,$(LIB_NETCDF) + SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff +else + SLIBS := +endif + +LIB_MPI := +SCC:= gcc +SFC:= gfortran + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -fopenmp + CFLAGS += -fopenmp + FFLAGS += -fopenmp +endif diff --git a/configuration/scripts/machines/env.banting_gnu b/configuration/scripts/machines/env.banting_gnu index 997816a9d..fed181121 100755 --- a/configuration/scripts/machines/env.banting_gnu +++ b/configuration/scripts/machines/env.banting_gnu @@ -25,7 +25,9 @@ setenv HDF5_USE_FILE_LOCKING FALSE # necessary since data is on an NFS filesyste endif setenv ICE_MACHINE_MACHNAME banting +setenv ICE_MACHINE_MACHINFO "Cray XC50, Intel Xeon Gold 6148 (Skylake), Aries Interconnect" setenv ICE_MACHINE_ENVNAME gnu +setenv ICE_MACHINE_ENVINFO "gnu??, cray-mpich??, netcdf??" setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/banting/cice/runs setenv ICE_MACHINE_INPUTDATA /home/ords/cmdd/cmde/sice500/ diff --git a/configuration/scripts/machines/env.banting_intel b/configuration/scripts/machines/env.banting_intel index 0beeb2618..5273c5d2e 100755 --- a/configuration/scripts/machines/env.banting_intel +++ b/configuration/scripts/machines/env.banting_intel @@ -20,7 +20,7 @@ setenv HDF5_USE_FILE_LOCKING FALSE # necessary since data is on an NFS filesyste endif setenv ICE_MACHINE_MACHNAME banting -setenv ICE_MACHINE_MACHINFO "Cray XC50, Intel Xeon Gold 6148 (Skylake)" +setenv ICE_MACHINE_MACHINFO "Cray XC50, Intel Xeon Gold 6148 (Skylake), Aries Interconnect" setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_ENVINFO "Intel 19.0.3.199, cray-mpich/7.7.7, cray-netcdf/4.6.1.3" setenv ICE_MACHINE_MAKE make diff --git a/configuration/scripts/machines/env.cesium_intel b/configuration/scripts/machines/env.cesium_intel index 8dabe1645..329bdf32d 100755 --- a/configuration/scripts/machines/env.cesium_intel +++ b/configuration/scripts/machines/env.cesium_intel @@ -10,7 +10,9 @@ source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and #setenv OMP_STACKSIZE 64M setenv ICE_MACHINE_MACHNAME cesium +setenv ICE_MACHINE_MACHINFO "cesium" setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "intel 2016.1.156, openmpi 1.6.5" setenv ICE_MACHINE_MAKE colormake-short setenv ICE_MACHINE_WKDIR /users/dor/afsg/phb/local/CICEDIRS/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /users/dor/afsg/phb/local/FORCING diff --git a/configuration/scripts/machines/env.cheyenne_gnu b/configuration/scripts/machines/env.cheyenne_gnu index 1b79c7f3b..c962c35f3 100755 --- a/configuration/scripts/machines/env.cheyenne_gnu +++ b/configuration/scripts/machines/env.cheyenne_gnu @@ -42,6 +42,8 @@ limit coredumpsize unlimited limit stacksize unlimited # May be needed for OpenMP memory #setenv OMP_STACKSIZE 64M +# OMP runtime diagnostics +#setenv OMP_DISPLAY_ENV TRUE setenv ICE_MACHINE_MACHNAME cheyenne setenv ICE_MACHINE_MACHINFO "SGI ICE XA Xeon E5-2697V4 Broadwell" @@ -55,7 +57,7 @@ setenv ICE_MACHINE_SUBMIT "qsub" setenv ICE_MACHINE_ACCT P00000000 setenv ICE_MACHINE_QUEUE "regular" setenv ICE_MACHINE_TPNODE 36 -setenv ICE_MACHINE_BLDTHRDS 1 +setenv ICE_MACHINE_BLDTHRDS 8 setenv ICE_MACHINE_QSTAT "qstat " # For lcov diff --git a/configuration/scripts/machines/env.cheyenne_intel b/configuration/scripts/machines/env.cheyenne_intel index f469b3585..89a8920b6 100755 --- a/configuration/scripts/machines/env.cheyenne_intel +++ b/configuration/scripts/machines/env.cheyenne_intel @@ -42,6 +42,8 @@ limit coredumpsize unlimited limit stacksize unlimited # May be needed for OpenMP memory #setenv OMP_STACKSIZE 64M +# OMP runtime diagnostics +#setenv OMP_DISPLAY_ENV TRUE setenv ICE_MACHINE_MACHNAME cheyenne setenv ICE_MACHINE_MACHINFO "SGI ICE XA Xeon E5-2697V4 Broadwell" @@ -55,5 +57,5 @@ setenv ICE_MACHINE_SUBMIT "qsub" setenv ICE_MACHINE_ACCT P00000000 setenv ICE_MACHINE_QUEUE "regular" setenv ICE_MACHINE_TPNODE 36 -setenv ICE_MACHINE_BLDTHRDS 1 +setenv ICE_MACHINE_BLDTHRDS 8 setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.cheyenne_pgi b/configuration/scripts/machines/env.cheyenne_pgi index b5cf11a51..5caa9d992 100755 --- a/configuration/scripts/machines/env.cheyenne_pgi +++ b/configuration/scripts/machines/env.cheyenne_pgi @@ -42,6 +42,8 @@ limit coredumpsize unlimited limit stacksize unlimited # May be needed for OpenMP memory #setenv OMP_STACKSIZE 64M +# OMP runtime diagnostics +#setenv OMP_DISPLAY_ENV TRUE setenv ICE_MACHINE_MACHNAME cheyenne setenv ICE_MACHINE_MACHINFO "SGI ICE XA Xeon E5-2697V4 Broadwell" @@ -55,5 +57,5 @@ setenv ICE_MACHINE_SUBMIT "qsub" setenv ICE_MACHINE_ACCT P00000000 setenv ICE_MACHINE_QUEUE "regular" setenv ICE_MACHINE_TPNODE 36 -setenv ICE_MACHINE_BLDTHRDS 1 +setenv ICE_MACHINE_BLDTHRDS 8 setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.conda_macos b/configuration/scripts/machines/env.conda_macos index 3b3537bf7..8eaf5b622 100755 --- a/configuration/scripts/machines/env.conda_macos +++ b/configuration/scripts/machines/env.conda_macos @@ -7,6 +7,11 @@ endif if ("$inp" != "-nomodules") then +#On macos, for this to work, you may need to do something like +# sudo xcode-select -r +# sudo xcode-select -s /Library/Developer/CommandLineTools +# sudo ln -s /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/* /usr/local/include/ + # Init conda if ! $?CONDA_EXE then echo "" diff --git a/configuration/scripts/machines/env.cori_intel b/configuration/scripts/machines/env.cori_intel index 734b2edf3..45b900983 100755 --- a/configuration/scripts/machines/env.cori_intel +++ b/configuration/scripts/machines/env.cori_intel @@ -13,42 +13,45 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-pgi -module load PrgEnv-intel/6.0.5 +module load PrgEnv-intel/6.0.10 module unload intel -module load intel/19.0.3.199 +module load intel/19.1.2.254 module unload gcc -module load gcc/8.2.0 +module load gcc/11.2.0 module unload cray-mpich module unload cray-mpich-abi -module load cray-mpich/7.7.6 +module load cray-mpich/7.7.19 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf -module load cray-netcdf/4.6.3.2 +module load cray-netcdf/4.8.1.1 module unload cray-libsci module unload craype -module load craype/2.6.2 +module load craype/2.7.10 setenv NETCDF_PATH ${NETCDF_DIR} + +endif + +setenv HDF5_USE_FILE_LOCKING FALSE + setenv OMP_PROC_BIND true setenv OMP_PLACES threads setenv OMP_STACKSIZE 32M limit coredumpsize unlimited limit stacksize unlimited -endif - setenv ICE_MACHINE_MACHNAME cori setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 19.0.3.199 20190206, cray-mpich/7.7.6, netcdf/4.6.3.2" +setenv ICE_MACHINE_ENVINFO "ifort 19.1.2.254 20200623, gcc/11.2.0, cray-mpich/7.7.19, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $SCRATCH/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /project/projectdirs/ccsm1/cice-consortium/ diff --git a/configuration/scripts/machines/env.daley_gnu b/configuration/scripts/machines/env.daley_gnu index 25b438e8e..24c2153e3 100755 --- a/configuration/scripts/machines/env.daley_gnu +++ b/configuration/scripts/machines/env.daley_gnu @@ -25,7 +25,9 @@ setenv HDF5_USE_FILE_LOCKING FALSE # necessary since data is on an NFS filesyste endif setenv ICE_MACHINE_MACHNAME daley +setenv ICE_MACHINE_MACHINFO "Cray XC50, Intel Xeon Gold 6148 (Skylake)" setenv ICE_MACHINE_ENVNAME gnu +setenv ICE_MACHINE_ENVINFO "gnu??, cray-mpich??, netcdf??" setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/daley/cice/runs setenv ICE_MACHINE_INPUTDATA /home/ords/cmdd/cmde/sice500/ diff --git a/configuration/scripts/machines/env.fram_intel b/configuration/scripts/machines/env.fram_intel index 98edb3a66..35de927e7 100755 --- a/configuration/scripts/machines/env.fram_intel +++ b/configuration/scripts/machines/env.fram_intel @@ -11,7 +11,9 @@ source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and #setenv OMP_STACKSIZE 64M setenv ICE_MACHINE_MACHNAME fram +setenv ICE_MACHINE_MACHINFO "fram" setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "intel 2016.1.156, openmpi 1.6.5, netcdf" setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR /home/dormrb01/zephyr4/armn/jfl/local1/CICEDIRS/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /users/dor/armn/jfl/local1/FORCING diff --git a/configuration/scripts/machines/env.gaea_intel b/configuration/scripts/machines/env.gaea_intel index e204c6fff..3601bfcfb 100755 --- a/configuration/scripts/machines/env.gaea_intel +++ b/configuration/scripts/machines/env.gaea_intel @@ -22,7 +22,7 @@ module list endif setenv ICE_MACHINE_MACHNAME gaea -setenv ICE_MACHINE_MACHINFO "Cray Intel SkyLake 6148" +setenv ICE_MACHINE_MACHINFO "Cray XC40 Intel Haswell/Broadwell 2.3GHz, Gemini Interconnect" setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_ENVINFO "ifort 18.0.5 20180823, cray-mpich, cray-netcdf" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.hera_intel b/configuration/scripts/machines/env.hera_intel index a9cf59516..6698c0c2c 100755 --- a/configuration/scripts/machines/env.hera_intel +++ b/configuration/scripts/machines/env.hera_intel @@ -15,13 +15,13 @@ module load impi/2018.0.4 module load netcdf/4.7.0 #module list -# May be needed for OpenMP memory -#setenv OMP_STACKSIZE 64M - endif +# May be needed for OpenMP memory +setenv OMP_STACKSIZE 64M + setenv ICE_MACHINE_MACHNAME hera -setenv ICE_MACHINE_MACHINFO "Cray Intel SkyLake 6148" +setenv ICE_MACHINE_MACHINFO "Cray CS500 Intel SkyLake 2.4GHz, Infiniband HDR" setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_ENVINFO "ifort 18.0.5 20180823, intelmpi/2018.0.4, netcdf/4.7.0" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.izumi_gnu b/configuration/scripts/machines/env.izumi_gnu index 873324e5d..5318b5930 100755 --- a/configuration/scripts/machines/env.izumi_gnu +++ b/configuration/scripts/machines/env.izumi_gnu @@ -17,7 +17,7 @@ setenv OMP_STACKSIZE 64M endif setenv ICE_MACHINE_MACHNAME izumi -setenv ICE_MACHINE_MACHINFO "Linux Cluster" +setenv ICE_MACHINE_MACHINFO "Intel Xeon Gold 5118 2.3GHz" setenv ICE_MACHINE_ENVNAME gnu setenv ICE_MACHINE_ENVINFO "GNU Fortran (GCC) 9.3.0, mvapich2-2.3.3, netcdf4.7.4" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.izumi_intel b/configuration/scripts/machines/env.izumi_intel index 33baa096e..773dc0572 100755 --- a/configuration/scripts/machines/env.izumi_intel +++ b/configuration/scripts/machines/env.izumi_intel @@ -17,7 +17,7 @@ setenv OMP_STACKSIZE 64M endif setenv ICE_MACHINE_MACHNAME izumi -setenv ICE_MACHINE_MACHINFO "Linux Cluster" +setenv ICE_MACHINE_MACHINFO "Intel Xeon Gold 5118 2.3GHz" setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_ENVINFO "ifort 19.1.0.166 20191121, mvapich2-2.3.3, netcdf4.7.4" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.izumi_nag b/configuration/scripts/machines/env.izumi_nag index d1ce4ba95..873b8e728 100755 --- a/configuration/scripts/machines/env.izumi_nag +++ b/configuration/scripts/machines/env.izumi_nag @@ -18,7 +18,7 @@ setenv OMP_STACKSIZE 64M endif setenv ICE_MACHINE_MACHNAME izumi -setenv ICE_MACHINE_MACHINFO "Linux Cluster" +setenv ICE_MACHINE_MACHINFO "Intel Xeon Gold 5118 2.3GHz" setenv ICE_MACHINE_ENVNAME nag setenv ICE_MACHINE_ENVINFO "NAG Fortran Compiler Release 6.2(Chiyoda) Build 6207, gcc (GCC) 8.1.0, cc (GCC) 4.8.5 20150623, mvapich2-2.3.3, netcdf/c4.6.1-f4.4.4" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.izumi_pgi b/configuration/scripts/machines/env.izumi_pgi index 8a8c36b8f..ab39eda9e 100755 --- a/configuration/scripts/machines/env.izumi_pgi +++ b/configuration/scripts/machines/env.izumi_pgi @@ -17,7 +17,7 @@ setenv OMP_STACKSIZE 64M endif setenv ICE_MACHINE_MACHNAME izumi -setenv ICE_MACHINE_MACHINFO "Linux Cluster" +setenv ICE_MACHINE_MACHINFO "Intel Xeon Gold 5118 2.3GHz" setenv ICE_MACHINE_ENVNAME pgi setenv ICE_MACHINE_ENVINFO "pgf90 20.1-0, mvapich2-2.3.3, netcdf4.7.4" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.millikan_intel b/configuration/scripts/machines/env.millikan_intel index c0a7356ad..350ea5b6e 100755 --- a/configuration/scripts/machines/env.millikan_intel +++ b/configuration/scripts/machines/env.millikan_intel @@ -10,7 +10,9 @@ source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and #setenv OMP_STACKSIZE 64M setenv ICE_MACHINE_MACHNAME millikan +setenv ICE_MACHINE_MACHINFO "millikan" setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "intel 2016.1.156, openmpi 1.6.5, netcdf" setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR /users/dor/armn/amb/data/local/runs setenv ICE_MACHINE_INPUTDATA /users/dor/armn/amb/data/local/forcing diff --git a/configuration/scripts/machines/env.mustang_intel18 b/configuration/scripts/machines/env.mustang_intel18 index 45e5b6518..d689d7ae4 100755 --- a/configuration/scripts/machines/env.mustang_intel18 +++ b/configuration/scripts/machines/env.mustang_intel18 @@ -33,7 +33,7 @@ endif setenv ICE_MACHINE_MACHNAME mustang setenv ICE_MACHINE_MACHINFO "HPE SGI 8600 Xeon Platinum 8168" setenv ICE_MACHINE_ENVNAME intel18 -setenv ICE_MACHINE_ENVINFO "ifort 18.0.3 20180410, mpt2.19, netcdf4.4.2" +setenv ICE_MACHINE_ENVINFO "ifort 18.0.3 20180410, mpt2.18, netcdf4.4.2" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice-consortium diff --git a/configuration/scripts/machines/env.mustang_intel20 b/configuration/scripts/machines/env.mustang_intel20 index cca0b3019..785875c29 100755 --- a/configuration/scripts/machines/env.mustang_intel20 +++ b/configuration/scripts/machines/env.mustang_intel20 @@ -33,7 +33,7 @@ endif setenv ICE_MACHINE_MACHNAME mustang setenv ICE_MACHINE_MACHINFO "HPE SGI 8600 Xeon Platinum 8168" setenv ICE_MACHINE_ENVNAME intel20 -setenv ICE_MACHINE_ENVINFO "ifort 19.1.1.217 20200306, mpt2.19, netcdf4.4.2" +setenv ICE_MACHINE_ENVINFO "ifort 19.1.1.217 20200306, mpt2.20, netcdf4.4.2" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice-consortium diff --git a/configuration/scripts/machines/env.narwhal_aocc b/configuration/scripts/machines/env.narwhal_aocc index 6d6822f46..4016a1d7d 100755 --- a/configuration/scripts/machines/env.narwhal_aocc +++ b/configuration/scripts/machines/env.narwhal_aocc @@ -39,9 +39,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.5, netcdf/4.7.4.4" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.nrlssc_gnu b/configuration/scripts/machines/env.nrlssc_gnu new file mode 100755 index 000000000..f050a61b8 --- /dev/null +++ b/configuration/scripts/machines/env.nrlssc_gnu @@ -0,0 +1,16 @@ +#!/bin/csh -f + +setenv ICE_MACHINE_MACHNAME NRLSSC +setenv ICE_MACHINE_MACHINFO "NRLSSC" +setenv ICE_MACHINE_ENVNAME gnu +setenv ICE_MACHINE_ENVINFO "gnu" +setenv ICE_MACHINE_MAKE gmake +setenv ICE_MACHINE_WKDIR /u/data/hebert/CICE_RUNS +setenv ICE_MACHINE_INPUTDATA /u/data/hebert/CICE_RUNS +setenv ICE_MACHINE_BASELINE /u/data/hebert/CICE_BASELINE +setenv ICE_MACHINE_SUBMIT "qsub " +setenv ICE_MACHINE_ACCT P00000000 +setenv ICE_MACHINE_QUEUE "standard" +setenv ICE_MACHINE_TPNODE 20 # tasks per node +setenv ICE_MACHINE_BLDTHRDS 1 +setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.orion_intel b/configuration/scripts/machines/env.orion_intel index bdfccdd60..7a8e47f5d 100755 --- a/configuration/scripts/machines/env.orion_intel +++ b/configuration/scripts/machines/env.orion_intel @@ -7,30 +7,29 @@ endif if ("$inp" != "-nomodules") then -echo "Modules don't currently work with csh on Orion" -echo "Please run the following manually before running cice.setup" -echo " module purge" -echo " module load intel/2020" -echo " module load impi/2020" -echo " module load netcdf/4.7.2" - -##source /etc/profile.d/modules.csh +source /etc/profile.d/z030-HPC2-lmod.csh ##module list -#module purge -#module load intel/2020 -#module load impi/2020 -#module load netcdf/4.7.2 +module purge +module load intel/2020.2 +module load impi/2020.2 +module load netcdf/4.7.4 ##module list -# May be needed for OpenMP memory -#setenv OMP_STACKSIZE 64M - endif +limit coredumpsize unlimited +limit stacksize unlimited + +# May be needed for OpenMP memory +setenv OMP_STACKSIZE 64M +#setenv OMP_PROC_BIND true +#setenv OMP_PLACES threads +#setenv OMP_DISPLAY_ENV TRUE + setenv ICE_MACHINE_MACHNAME orion setenv ICE_MACHINE_MACHINFO "Dell EMC PowerEdge C6420 Xeon Gold 6148" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 19.1.0.166 20191121, intelmpi 2019 Update 6 Build 20191024, netcdf/4.7.2" +setenv ICE_MACHINE_ENVINFO "ifort 19.1.2.254 20200623, intelmpi 2019 Update 8 Build 20200624 (id: 4f16ad915), netcdf/4.7.4" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $HOME/scratch/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /home/acraig/scratch/CICE_INPUTDATA diff --git a/configuration/scripts/options/set_env.gridavgchk b/configuration/scripts/options/set_env.gridavgchk new file mode 100644 index 000000000..740d1e9c3 --- /dev/null +++ b/configuration/scripts/options/set_env.gridavgchk @@ -0,0 +1,2 @@ +setenv ICE_DRVOPT unittest/gridavgchk +setenv ICE_TARGET gridavgchk diff --git a/configuration/scripts/options/set_nml.alt01 b/configuration/scripts/options/set_nml.alt01 index afe67691d..24947dcda 100644 --- a/configuration/scripts/options/set_nml.alt01 +++ b/configuration/scripts/options/set_nml.alt01 @@ -1,6 +1,6 @@ nilyr = 1 use_leap_years = .false. -ice_ic = 'default' +ice_ic = 'internal' distribution_type = 'roundrobin' distribution_wght = 'block' tr_iage = .false. diff --git a/configuration/scripts/options/set_nml.alt02 b/configuration/scripts/options/set_nml.alt02 index c4dbb0fea..a478809ca 100644 --- a/configuration/scripts/options/set_nml.alt02 +++ b/configuration/scripts/options/set_nml.alt02 @@ -1,6 +1,6 @@ ncat = 1 kcatbound = -1 -ice_ic = 'default' +ice_ic = 'internal' distribution_type = 'sectrobin' tr_iage = .true. tr_FY = .true. diff --git a/configuration/scripts/options/set_nml.alt03 b/configuration/scripts/options/set_nml.alt03 index 98e794735..c2ca38f32 100644 --- a/configuration/scripts/options/set_nml.alt03 +++ b/configuration/scripts/options/set_nml.alt03 @@ -1,6 +1,6 @@ ncat = 6 kcatbound = 2 -ice_ic = 'default' +ice_ic = 'internal' distribution_type = 'sectcart' conserv_check = .true. tr_iage = .false. diff --git a/configuration/scripts/options/set_nml.alt04 b/configuration/scripts/options/set_nml.alt04 index 98eb311cb..d1bc6ad02 100644 --- a/configuration/scripts/options/set_nml.alt04 +++ b/configuration/scripts/options/set_nml.alt04 @@ -1,4 +1,4 @@ -ice_ic = 'default' +ice_ic = 'internal' bfbflag = 'reprosum' distribution_type = 'rake' processor_shape = 'slenderX2' diff --git a/configuration/scripts/options/set_nml.alt05 b/configuration/scripts/options/set_nml.alt05 index a281bfa23..6793b5954 100644 --- a/configuration/scripts/options/set_nml.alt05 +++ b/configuration/scripts/options/set_nml.alt05 @@ -1,4 +1,4 @@ -ice_ic = 'default' +ice_ic = 'internal' tr_iage = .false. tr_FY = .false. tr_lvl = .false. diff --git a/configuration/scripts/options/set_nml.alt06 b/configuration/scripts/options/set_nml.alt06 index cd3a2222d..911acf8eb 100644 --- a/configuration/scripts/options/set_nml.alt06 +++ b/configuration/scripts/options/set_nml.alt06 @@ -1,5 +1,5 @@ ncat = 7 kcatbound = 3 nslyr = 3 -ice_ic = 'default' +ice_ic = 'internal' diff --git a/configuration/scripts/options/set_nml.box2001 b/configuration/scripts/options/set_nml.box2001 index c166b4217..adce08a74 100644 --- a/configuration/scripts/options/set_nml.box2001 +++ b/configuration/scripts/options/set_nml.box2001 @@ -1,10 +1,13 @@ +grid_atm = 'B' +grid_ocn = 'B' days_per_year = 360 use_leap_years = .false. npt = 240 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','x','x','x','x' grid_type = 'rectangular' +kmt_type = 'default' dxrect = 16.e5 dyrect = 16.e5 close_boundaries = .true. @@ -22,7 +25,10 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'box2001' +ocn_data_type = 'box2001' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' calc_strair = .false. restore_ice = .true. f_aice = 'd' diff --git a/configuration/scripts/options/set_nml.boxadv b/configuration/scripts/options/set_nml.boxadv index 7fc70713e..815746102 100644 --- a/configuration/scripts/options/set_nml.boxadv +++ b/configuration/scripts/options/set_nml.boxadv @@ -1,9 +1,15 @@ +grid_ocn = 'B' nilyr = 1 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .false. kcatbound = 2 ew_boundary_type = 'cyclic' ns_boundary_type = 'cyclic' +atm_data_type = 'box2001' +ocn_data_type = 'box2001' +ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' tr_iage = .true. tr_FY = .false. tr_lvl = .true. diff --git a/configuration/scripts/options/set_nml.boxchan b/configuration/scripts/options/set_nml.boxchan new file mode 100644 index 000000000..a3f0fd191 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxchan @@ -0,0 +1,59 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'internal' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'channel' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'channel' +ice_data_conc = 'p5' +ice_data_dist = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' +f_sigP = 'd1' diff --git a/configuration/scripts/options/set_nml.boxislandse b/configuration/scripts/options/set_nml.boxislandse new file mode 100644 index 000000000..4a8a47705 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxislandse @@ -0,0 +1,46 @@ +npt = 48 +kmt_type = 'boxislands' +ice_ic = 'internal' +use_leap_years = .false. +histfreq = 'd','x','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'open' +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'constant' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' +rotate_wind = .false. +calc_strair = .false. +restore_ice = .false. +tr_iage = .false. +tr_FY = .false. +tr_pond_lvl = .false. +f_aice = 'd' +f_hi = 'd' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd' +f_vvel = 'd' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd' +f_strairy = 'd' +f_strocnx = 'd' +f_strocny = 'd' +f_divu = 'd' +f_sig1 = 'd' +f_sig2 = 'd' diff --git a/configuration/scripts/options/set_nml.boxislandsn b/configuration/scripts/options/set_nml.boxislandsn new file mode 100644 index 000000000..dd386ce5a --- /dev/null +++ b/configuration/scripts/options/set_nml.boxislandsn @@ -0,0 +1,46 @@ +npt = 48 +kmt_type = 'boxislands' +ice_ic = 'internal' +use_leap_years = .false. +histfreq = 'd','x','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'open' +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'constant' +atmbndy = 'constant' +atm_data_type = 'uniform_north' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' +rotate_wind = .false. +calc_strair = .false. +restore_ice = .false. +tr_iage = .false. +tr_FY = .false. +tr_pond_lvl = .false. +f_aice = 'd' +f_hi = 'd' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd' +f_vvel = 'd' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd' +f_strairy = 'd' +f_strocnx = 'd' +f_strocny = 'd' +f_divu = 'd' +f_sig1 = 'd' +f_sig2 = 'd' diff --git a/configuration/scripts/options/set_nml.boxislandsne b/configuration/scripts/options/set_nml.boxislandsne new file mode 100644 index 000000000..75db55722 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxislandsne @@ -0,0 +1,46 @@ +npt = 48 +kmt_type = 'boxislands' +ice_ic = 'internal' +use_leap_years = .false. +histfreq = 'd','x','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'open' +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'constant' +atmbndy = 'constant' +atm_data_type = 'uniform_northeast' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' +rotate_wind = .false. +calc_strair = .false. +restore_ice = .false. +tr_iage = .false. +tr_FY = .false. +tr_pond_lvl = .false. +f_aice = 'd' +f_hi = 'd' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd' +f_vvel = 'd' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd' +f_strairy = 'd' +f_strocnx = 'd' +f_strocny = 'd' +f_divu = 'd' +f_sig1 = 'd' +f_sig2 = 'd' diff --git a/configuration/scripts/options/set_nml.boxnodyn b/configuration/scripts/options/set_nml.boxnodyn index 35a224c92..deb53cf5a 100644 --- a/configuration/scripts/options/set_nml.boxnodyn +++ b/configuration/scripts/options/set_nml.boxnodyn @@ -1,5 +1,5 @@ nilyr = 1 -ice_ic = 'default' +ice_ic = 'internal' days_per_year = 360 use_leap_years = .false. npt = 72 @@ -8,6 +8,30 @@ dumpfreq_n = 2 histfreq = 'd','x','x','x','x' histfreq_n = 2,1,1,1,1 f_aice = 'd' +f_uvel = 'd' +f_vvel = 'd' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd' +f_strairy = 'd' +f_strtltx = 'd' +f_strtlty = 'd' +f_strcorx = 'd' +f_strcory = 'd' +f_strocnx = 'd' +f_strocny = 'd' +f_strintx = 'd' +f_strinty = 'd' +f_taubx = 'd' +f_tauby = 'd' +f_strength = 'd' +f_divu = 'd' +f_shear = 'd' +f_sig1 = 'd' +f_sig2 = 'd' +f_sigP = 'd' kcatbound = 0 ew_boundary_type = 'open' ns_boundary_type = 'open' @@ -20,10 +44,16 @@ tr_pond_lvl = .false. tr_aero = .false. kitd = 0 ktherm = 0 -kdyn = 0 +kdyn = 1 revised_evp = .false. -kstrength = 0 +kstrength = 1 krdg_partic = 1 krdg_redist = 1 +seabed_stress = .true. +atm_data_type = 'calm' +ocn_data_type = 'calm' +ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' shortwave = 'ccsm3' albedo_type = 'constant' diff --git a/configuration/scripts/options/set_nml.boxrestore b/configuration/scripts/options/set_nml.boxrestore index ac0266aeb..b2078a566 100644 --- a/configuration/scripts/options/set_nml.boxrestore +++ b/configuration/scripts/options/set_nml.boxrestore @@ -1,11 +1,17 @@ +grid_ocn = 'B' nilyr = 1 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. use_leap_years = .false. ndtd = 2 kcatbound = 1 ew_boundary_type = 'cyclic' ns_boundary_type = 'open' +atm_data_type = 'box2001' +ocn_data_type = 'box2001' +ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' histfreq = 'd','x','x','x','x' histfreq_n = 1,1,1,1,1 f_aice = 'd' diff --git a/configuration/scripts/options/set_nml.boxslotcyl b/configuration/scripts/options/set_nml.boxslotcyl index 9985cef13..3edd33ad8 100644 --- a/configuration/scripts/options/set_nml.boxslotcyl +++ b/configuration/scripts/options/set_nml.boxslotcyl @@ -1,22 +1,29 @@ +grid_atm = 'B' +grid_ocn = 'B' nilyr = 1 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .false. dt = 3600.0 npt = 288 grid_type = 'rectangular' +kmt_type = 'default' dxrect = 10.e5 dyrect = 10.e5 kcatbound = 2 ew_boundary_type = 'open' ns_boundary_type = 'open' close_boundaries = .true. -tr_lvl = .false. +tr_lvl = .false. tr_pond_lvl = .false. ktherm = -1 kdyn = -1 kridge = -1 ktransport = 1 +atm_data_type = 'box2001' +ocn_data_type = 'box2001' ice_data_type = 'boxslotcyl' +ice_data_conc = 'c1' +ice_data_dist = 'uniform' histfreq = 'h','x','x','x','x' histfreq_n = 6 , 1 , 1 , 1 , 1 f_aice = 'h' diff --git a/configuration/scripts/options/set_nml.boxsyme b/configuration/scripts/options/set_nml.boxsyme new file mode 100644 index 000000000..3ff31d2c4 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxsyme @@ -0,0 +1,58 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'internal' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'default' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .true. +ew_boundary_type = 'open' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsymn b/configuration/scripts/options/set_nml.boxsymn new file mode 100644 index 000000000..90ef74813 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxsymn @@ -0,0 +1,58 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'internal' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'default' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .true. +ew_boundary_type = 'open' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_north' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsymne b/configuration/scripts/options/set_nml.boxsymne new file mode 100644 index 000000000..5c7374976 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxsymne @@ -0,0 +1,58 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'internal' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'default' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .true. +ew_boundary_type = 'open' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'constant' +atmbndy = 'constant' +atm_data_type = 'uniform_northeast' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsyms b/configuration/scripts/options/set_nml.boxsyms new file mode 100644 index 000000000..7fc0fc5a0 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxsyms @@ -0,0 +1,58 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'internal' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'default' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .true. +ew_boundary_type = 'open' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_south' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsymw b/configuration/scripts/options/set_nml.boxsymw new file mode 100644 index 000000000..4be1f5f95 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxsymw @@ -0,0 +1,58 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'internal' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'default' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .true. +ew_boundary_type = 'open' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_west' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxwall b/configuration/scripts/options/set_nml.boxwall new file mode 100644 index 000000000..5a99e311b --- /dev/null +++ b/configuration/scripts/options/set_nml.boxwall @@ -0,0 +1,58 @@ +days_per_year = 360 +use_leap_years = .false. +npt = 240 +ice_ic = 'internal' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'wall' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'cyclic' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'easthalf' +ice_data_conc = 'c1' +ice_data_dist = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' +f_sigP = 'd1' diff --git a/configuration/scripts/options/set_nml.boxwallblock b/configuration/scripts/options/set_nml.boxwallblock new file mode 100644 index 000000000..5ef4cce7a --- /dev/null +++ b/configuration/scripts/options/set_nml.boxwallblock @@ -0,0 +1,58 @@ +days_per_year = 360 +use_leap_years = .false. +npt = 240 +ice_ic = 'internal' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'wall' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'cyclic' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'eastblock' +ice_data_conc = 'c1' +ice_data_dist = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' +f_sigP = 'd1' diff --git a/configuration/scripts/options/set_nml.boxwallp5 b/configuration/scripts/options/set_nml.boxwallp5 new file mode 100644 index 000000000..229dba456 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxwallp5 @@ -0,0 +1,58 @@ +days_per_year = 360 +use_leap_years = .false. +npt = 240 +ice_ic = 'internal' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'wall' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'cyclic' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'easthalf' +ice_data_conc = 'p5' +ice_data_dist = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' +f_sigP = 'd1' diff --git a/configuration/scripts/options/set_nml.dwblockall b/configuration/scripts/options/set_nml.dwblockall new file mode 100644 index 000000000..161c86b01 --- /dev/null +++ b/configuration/scripts/options/set_nml.dwblockall @@ -0,0 +1 @@ +distribution_wght = 'blockall' diff --git a/configuration/scripts/options/set_nml.dynanderson b/configuration/scripts/options/set_nml.dynanderson index 566c53a09..2e8e13659 100644 --- a/configuration/scripts/options/set_nml.dynanderson +++ b/configuration/scripts/options/set_nml.dynanderson @@ -1,3 +1,5 @@ kdyn = 3 algo_nonlin = 'anderson' use_mean_vrel = .false. +capping = 1. + diff --git a/configuration/scripts/options/set_nml.dynpicard b/configuration/scripts/options/set_nml.dynpicard index b81f4d4e6..05efb3526 100644 --- a/configuration/scripts/options/set_nml.dynpicard +++ b/configuration/scripts/options/set_nml.dynpicard @@ -1,3 +1,4 @@ kdyn = 3 algo_nonlin = 'picard' use_mean_vrel = .true. +capping = 1. diff --git a/configuration/scripts/options/set_nml.gbox12 b/configuration/scripts/options/set_nml.gbox12 new file mode 100644 index 000000000..6ad5b567b --- /dev/null +++ b/configuration/scripts/options/set_nml.gbox12 @@ -0,0 +1,8 @@ +ice_ic = 'internal' +grid_type = 'rectangular' +kmt_type = 'default' +atm_data_type = 'box2001' +ocn_data_type = 'calm' +ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox128 b/configuration/scripts/options/set_nml.gbox128 index 7b139f94a..f82267c64 100644 --- a/configuration/scripts/options/set_nml.gbox128 +++ b/configuration/scripts/options/set_nml.gbox128 @@ -1,4 +1,10 @@ -ice_ic = 'default' +grid_ocn = 'B' +ice_ic = 'internal' grid_type = 'rectangular' +kmt_type = 'default' atm_data_type = 'box2001' +ocn_data_type = 'box2001' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' + diff --git a/configuration/scripts/options/set_nml.gbox180 b/configuration/scripts/options/set_nml.gbox180 index 7b139f94a..6ad5b567b 100644 --- a/configuration/scripts/options/set_nml.gbox180 +++ b/configuration/scripts/options/set_nml.gbox180 @@ -1,4 +1,8 @@ -ice_ic = 'default' +ice_ic = 'internal' grid_type = 'rectangular' +kmt_type = 'default' atm_data_type = 'box2001' +ocn_data_type = 'calm' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox80 b/configuration/scripts/options/set_nml.gbox80 index 7b139f94a..6ad5b567b 100644 --- a/configuration/scripts/options/set_nml.gbox80 +++ b/configuration/scripts/options/set_nml.gbox80 @@ -1,4 +1,8 @@ -ice_ic = 'default' +ice_ic = 'internal' grid_type = 'rectangular' +kmt_type = 'default' atm_data_type = 'box2001' +ocn_data_type = 'calm' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gridb b/configuration/scripts/options/set_nml.gridb new file mode 100644 index 000000000..eadfc15ce --- /dev/null +++ b/configuration/scripts/options/set_nml.gridb @@ -0,0 +1,2 @@ +grid_ice = 'B' + diff --git a/configuration/scripts/options/set_nml.gridc b/configuration/scripts/options/set_nml.gridc new file mode 100644 index 000000000..a04fab4fd --- /dev/null +++ b/configuration/scripts/options/set_nml.gridc @@ -0,0 +1,2 @@ +grid_ice = 'C' + diff --git a/configuration/scripts/options/set_nml.gridcd b/configuration/scripts/options/set_nml.gridcd new file mode 100644 index 000000000..104801879 --- /dev/null +++ b/configuration/scripts/options/set_nml.gridcd @@ -0,0 +1,2 @@ +grid_ice = 'C_override_D' + diff --git a/configuration/scripts/options/set_nml.histdbg b/configuration/scripts/options/set_nml.histdbg new file mode 100644 index 000000000..247d185fd --- /dev/null +++ b/configuration/scripts/options/set_nml.histdbg @@ -0,0 +1,225 @@ + histfreq = 'm','d','1','h','x' + histfreq_n = 1,1,1,1,1 + histfreq_base = 'zero' + write_ic = .true. + f_tmask = .true. + f_blkmask = .true. + f_tarea = .true. + f_uarea = .true. + f_dxt = .true. + f_dyt = .true. + f_dxu = .true. + f_dyu = .true. + f_HTN = .true. + f_HTE = .true. + f_ANGLE = .true. + f_ANGLET = .true. + f_NCAT = .true. + f_VGRDi = .true. + f_VGRDs = .true. + f_VGRDb = .true. + f_VGRDa = .true. + f_bounds = .true. + f_CMIP = 'm' + f_aice = 'md1h' + f_hi = 'h1dm' + f_hs = 'd1m' + f_Tsfc = 'mdh1' + f_sice = 'md' + f_uvel = 'md1' + f_vvel = 'dm1' + f_uatm = 'dm1' + f_vatm = 'dm1' + f_fswdn = 'dm1' + f_flwdn = 'md1' + f_snowfrac = 'md1' + f_snow = 'md1' + f_snow_ai = 'md1' + f_rain = 'md1' + f_rain_ai = 'md1' + f_sst = 'md1' + f_sss = 'md1' + f_uocn = 'md1' + f_vocn = 'md1' + f_frzmlt = 'md' + f_fswfac = 'md' + f_fswint_ai = 'md' + f_fswabs = 'md' + f_fswabs_ai = 'md' + f_albsni = 'md' + f_alvdr = 'md' + f_alidr = 'md' + f_alvdf = 'md' + f_alidf = 'md' + f_alvdr_ai = 'md' + f_alidr_ai = 'md' + f_alvdf_ai = 'md' + f_alidf_ai = 'md' + f_albice = 'md' + f_albsno = 'md' + f_albpnd = 'md' + f_coszen = 'md' + f_flat = 'md' + f_flat_ai = 'md' + f_fsens = 'md' + f_fsens_ai = 'md' + f_fswup = 'md' + f_flwup = 'md' + f_flwup_ai = 'md' + f_evap = 'md' + f_evap_ai = 'md' + f_Tair = 'md' + f_Tref = 'md' + f_Qref = 'md' + f_congel = 'md' + f_frazil = 'md' + f_snoice = 'md' + f_dsnow = 'md' + f_melts = 'md' + f_meltt = 'md' + f_meltb = 'md' + f_meltl = 'md' + f_fresh = 'md' + f_fresh_ai = 'md' + f_fsalt = 'md' + f_fsalt_ai = 'md' + f_fbot = 'md' + f_fhocn = 'md' + f_fhocn_ai = 'md' + f_fswthru = 'md' + f_fswthru_ai = 'md' + f_fsurf_ai = 'md' + f_fcondtop_ai = 'md' + f_fmeltt_ai = 'md' + f_strairx = 'md1' + f_strairy = 'md1' + f_strtltx = 'md1' + f_strtlty = 'md1' + f_strcorx = 'md1' + f_strcory = 'md1' + f_strocnx = 'md1' + f_strocny = 'md1' + f_strintx = 'md1' + f_strinty = 'md1' + f_taubx = 'md1' + f_tauby = 'md1' + f_strength = 'md1' + f_divu = 'md1' + f_shear = 'md1' + f_sig1 = 'md1' + f_sig2 = 'md1' + f_sigP = 'md1' + f_dvidtt = 'md' + f_dvidtd = 'md' + f_daidtt = 'md' + f_daidtd = 'md' + f_dagedtt = 'md' + f_dagedtd = 'md' + f_mlt_onset = 'md' + f_frz_onset = 'md' + f_hisnap = 'md' + f_aisnap = 'md' + f_trsig = 'md' + f_icepresent = 'md' + f_iage = 'md' + f_FY = 'md' + f_aicen = 'md' + f_vicen = 'md' + f_vsnon = 'md' + f_snowfracn = 'md' + f_keffn_top = 'md' + f_Tinz = 'md' + f_Sinz = 'md' + f_Tsnz = 'md' + f_fsurfn_ai = 'md' + f_fcondtopn_ai = 'md' + f_fmelttn_ai = 'md' + f_flatn_ai = 'md' + f_fsensn_ai = 'md' + f_alvl = 'md' + f_vlvl = 'md' + f_ardg = 'md' + f_vrdg = 'md' + f_dardg1dt = 'md' + f_dardg2dt = 'md' + f_dvirdgdt = 'md' + f_opening = 'md' + f_ardgn = 'md' + f_vrdgn = 'md' + f_dardg1ndt = 'md' + f_dardg2ndt = 'md' + f_dvirdgndt = 'md' + f_krdgn = 'md' + f_aparticn = 'md' + f_aredistn = 'md' + f_vredistn = 'md' + f_araftn = 'md' + f_vraftn = 'md' + f_apondn = 'md' + f_apeffn = 'md' + f_hpondn = 'md' + f_apond = 'md' + f_hpond = 'md' + f_ipond = 'md' + f_apeff = 'md' + f_apond_ai = 'md' + f_hpond_ai = 'md' + f_ipond_ai = 'md' + f_apeff_ai = 'md' + f_fiso_atm = 'md' + f_fiso_ocn = 'md' + f_iso = 'md' + f_faero_atm = 'md' + f_faero_ocn = 'md' + f_aero = 'md' + f_fbio = 'md' + f_fbio_ai = 'md' + f_zaero = 'md' + f_bgc_S = 'md' + f_bgc_N = 'md' + f_bgc_C = 'md' + f_bgc_DOC = 'md' + f_bgc_DIC = 'md' + f_bgc_chl = 'md' + f_bgc_Nit = 'md' + f_bgc_Am = 'md' + f_bgc_Sil = 'md' + f_bgc_DMSPp = 'md' + f_bgc_DMSPd = 'md' + f_bgc_DMS = 'md' + f_bgc_DON = 'md' + f_bgc_Fe = 'md' + f_bgc_hum = 'md' + f_bgc_PON = 'md' + f_bgc_ml = 'md' + f_upNO = 'md' + f_upNH = 'md' + f_bTin = 'md' + f_bphi = 'md' + f_iDi = 'md' + f_iki = 'md' + f_fbri = 'md' + f_hbri = 'md' + f_zfswin = 'md' + f_bionet = 'md' + f_biosnow = 'md' + f_grownet = 'md' + f_PPnet = 'md' + f_algalpeak = 'md' + f_zbgc_frac = 'md' + f_drag = 'md' + f_Cdn_atm = 'md' + f_Cdn_ocn = 'md' + f_fsdrad = 'md' + f_fsdperim = 'md' + f_afsd = 'md' + f_afsdn = 'md' + f_dafsd_newi = 'md' + f_dafsd_latg = 'md' + f_dafsd_latm = 'md' + f_dafsd_wave = 'md' + f_dafsd_weld = 'md' + f_wave_sig_ht = 'md' + f_aice_ww = 'md' + f_diam_ww = 'md' + f_hice_ww = 'md' diff --git a/configuration/scripts/options/set_nml.icdefault b/configuration/scripts/options/set_nml.icdefault index 2f3c092d4..7019acf0b 100644 --- a/configuration/scripts/options/set_nml.icdefault +++ b/configuration/scripts/options/set_nml.icdefault @@ -1 +1 @@ -ice_ic = 'default' +ice_ic = 'internal' diff --git a/configuration/scripts/options/set_nml.iobinary b/configuration/scripts/options/set_nml.iobinary index 2f3c092d4..7019acf0b 100644 --- a/configuration/scripts/options/set_nml.iobinary +++ b/configuration/scripts/options/set_nml.iobinary @@ -1 +1 @@ -ice_ic = 'default' +ice_ic = 'internal' diff --git a/configuration/scripts/options/set_nml.kmtislands b/configuration/scripts/options/set_nml.kmtislands new file mode 100644 index 000000000..238c67d3f --- /dev/null +++ b/configuration/scripts/options/set_nml.kmtislands @@ -0,0 +1 @@ +kmt_type = 'boxislands' diff --git a/configuration/scripts/options/set_nml.run10year b/configuration/scripts/options/set_nml.run10year index cf672e991..0eb95071a 100644 --- a/configuration/scripts/options/set_nml.run10year +++ b/configuration/scripts/options/set_nml.run10year @@ -1,7 +1,7 @@ npt_unit = 'y' npt = 10 dumpfreq = 'y' -dumpfreq_n = 12 +dumpfreq_n = 1 diagfreq = 24 histfreq = 'm','x','x','x','x' diff --git a/configuration/scripts/options/set_nml.tx1 b/configuration/scripts/options/set_nml.tx1 index 2ef4edd33..5e66db871 100644 --- a/configuration/scripts/options/set_nml.tx1 +++ b/configuration/scripts/options/set_nml.tx1 @@ -1,6 +1,6 @@ dt = 3600.0 runtype = 'initial' -ice_ic = 'default' +ice_ic = 'internal' grid_format = 'bin' grid_type = 'tripole' ns_boundary_type = 'tripole' diff --git a/configuration/scripts/options/set_nml.zsal b/configuration/scripts/options/set_nml.zsal index 5503e0231..724893ffc 100644 --- a/configuration/scripts/options/set_nml.zsal +++ b/configuration/scripts/options/set_nml.zsal @@ -4,5 +4,5 @@ sw_redist = .true. tfrz_option = 'linear_salt' tr_brine = .true. solve_zsal = .true. -ice_ic = 'default' +ice_ic = 'internal' diff --git a/configuration/scripts/options/test_nml.restart21 b/configuration/scripts/options/test_nml.restart21 new file mode 100644 index 000000000..7e4281ff6 --- /dev/null +++ b/configuration/scripts/options/test_nml.restart21 @@ -0,0 +1,7 @@ +npt = 2 +npt_unit = 'd' +dumpfreq = 'd' +dumpfreq_n = 1 +dumpfreq_base = 'init' +runtype = 'initial' +use_restart_time = .false. diff --git a/configuration/scripts/options/test_nml.restart22 b/configuration/scripts/options/test_nml.restart22 new file mode 100644 index 000000000..edc3e975a --- /dev/null +++ b/configuration/scripts/options/test_nml.restart22 @@ -0,0 +1,6 @@ +npt = 2 +npt_unit = 'd' +dumpfreq = 'd' +dumpfreq_n = 2 +dumpfreq_base = 'init' +runtype = 'continue' diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index e4c376ad4..858961eac 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -40,6 +40,7 @@ restart gbox128 4x4 boxrestore,short smoke gbox128 4x4 boxrestore,short,debug restart gbox80 1x1 box2001 smoke gbox80 1x1 boxslotcyl +smoke gbox12 1x1x12x12x1 boxchan,diag1,debug smoke gx3 8x2 bgcz smoke gx3 8x2 bgcz,debug smoke gx3 8x1 bgcskl,debug diff --git a/configuration/scripts/tests/comparelog.csh b/configuration/scripts/tests/comparelog.csh index af6b2d76e..576289cd7 100755 --- a/configuration/scripts/tests/comparelog.csh +++ b/configuration/scripts/tests/comparelog.csh @@ -55,8 +55,8 @@ if (${filearg} == 1) then touch ${test_out} if (${cicefile} == 1) then - cat ${base_data} | grep -A 99999999 "total ice area (km^2)" | grep -e istep1: -e = | grep -iv "min, max, sum" >&! ${base_out} - cat ${test_data} | grep -A 99999999 "total ice area (km^2)" | grep -e istep1: -e = | grep -iv "min, max, sum" >&! ${test_out} + cat ${base_data} | grep -A 99999999 "total ice area (km^2)" | grep -e istep1: -e = | grep -iv "min, max, sum" | grep -iv "init_vert" | grep -iv "ridge_ice" >&! ${base_out} + cat ${test_data} | grep -A 99999999 "total ice area (km^2)" | grep -e istep1: -e = | grep -iv "min, max, sum" | grep -iv "init_vert" | grep -iv "ridge_ice" >&! ${test_out} else sed -n '/RunningUnitTest/,$p' ${base_data} >! ${base_out} sed -n '/RunningUnitTest/,$p' ${test_data} >! ${test_out} diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts new file mode 100644 index 000000000..6909c1ac9 --- /dev/null +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -0,0 +1,87 @@ +# Test Grid PEs Sets BFB-compare +smoke gx3 8x2 diag1,run5day +smoke gx3 8x4 diag1,run5day,debug +restart gx3 4x2 debug,diag1 +restart2 gx1 16x2 debug,diag1 +smoke gbox12 1x1x12x12x1 boxchan +smoke gbox80 1x1 box2001 +smoke gbox80 2x2 boxwallp5 +smoke gbox80 3x3 boxwall +smoke gbox80 2x2 boxwallblock +smoke gbox80 1x1 boxslotcyl +smoke gbox80 2x4 boxnodyn +#smoke gbox80 2x2 boxsymn,run1day +smoke gbox80 4x2 boxsyme,run1day +#smoke gbox80 4x1 boxsymne,run1day +#smoke gbox80 2x2 boxsymn,run1day,kmtislands +smoke gbox80 4x1 boxsyme,run1day,kmtislands +#smoke gbox80 4x2 boxsymne,run1day,kmtislands +#smoke gbox80 8x1 boxislandsn,run1day +smoke gbox80 4x2 boxislandse,run1day +#smoke gbox80 2x4 boxislandsne,run1day +smoke gx3 1x1x100x116x1 reprosum,run10day +smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day +smoke gx1 32x1x16x16x32 reprosum,run10day +smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest smoke_gx1_32x1x16x16x32_reprosum_run10day + +smoke gx3 8x2 diag1,run5day,gridcd +smoke gx3 8x4 diag1,run5day,debug,gridcd +restart gx3 4x2 debug,diag1,gridcd +restart2 gx1 16x2 debug,diag1,gridcd +smoke gbox12 1x1x12x12x1 boxchan,gridcd +smoke gbox80 1x1 box2001,gridcd +smoke gbox80 2x2 boxwallp5,gridcd +smoke gbox80 3x3 boxwall,gridcd +smoke gbox80 2x2 boxwallblock,gridcd +smoke gbox80 1x1 boxslotcyl,gridcd +smoke gbox80 2x4 boxnodyn,gridcd +#smoke gbox80 2x2 boxsymn,run1day,gridcd +smoke gbox80 4x2 boxsyme,run1day,gridcd +#smoke gbox80 4x1 boxsymne,run1day,gridcd +#smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridcd +smoke gbox80 4x1 boxsyme,run1day,kmtislands,gridcd +#smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridcd +#smoke gbox80 8x1 boxislandsn,run1day,gridcd +smoke gbox80 4x2 boxislandse,run1day,gridcd +#smoke gbox80 2x4 boxislandsne,run1day,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd +smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day +smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd +smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day + +smoke gx3 8x2 diag1,run5day,gridc +smoke gx3 8x4 diag1,run5day,debug,gridc +restart gx3 4x2 debug,diag1,gridc +restart2 gx1 16x2 debug,diag1,gridc +smoke gbox12 1x1x12x12x1 boxchan,gridc +smoke gbox80 1x1 box2001,gridc +smoke gbox80 2x2 boxwallp5,gridc +smoke gbox80 3x3 boxwall,gridc +smoke gbox80 2x2 boxwallblock,gridc +smoke gbox80 1x1 boxslotcyl,gridc +smoke gbox80 2x4 boxnodyn,gridc +#smoke gbox80 2x2 boxsymn,run1day,gridc +smoke gbox80 4x2 boxsyme,run1day,gridc +#smoke gbox80 4x1 boxsymne,run1day,gridc +#smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridc +smoke gbox80 4x1 boxsyme,run1day,kmtislands,gridc +#smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridc +#smoke gbox80 8x1 boxislandsn,run1day,gridc +smoke gbox80 4x2 boxislandse,run1day,gridc +#smoke gbox80 2x4 boxislandsne,run1day,gridc +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc +smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day +smoke gx1 32x1x16x16x32 reprosum,run10day,gridc +smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index 9202b06e5..ea8680170 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -8,12 +8,14 @@ smoke gx3 4x4 alt04,reprosum,run10day smoke gx3 4x4 alt05,reprosum,run10day smoke gx3 8x2 alt06,reprosum,run10day smoke gx3 8x2 bgcz,reprosum,run10day +smoke gx1 15x2 reprosum,run10day smoke gx1 15x2 seabedprob,reprosum,run10day smoke gx3 14x2 fsd12,reprosum,run10day smoke gx3 11x2 isotope,reprosum,run10day smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day smoke gx3 6x4 dynpicard,reprosum,run10day smoke gx3 8x3 zsal,reprosum,run10day +smoke gx3 1x1x100x116x1 reprosum,run10day,thread smoke gbox128 8x2 reprosum,run10day smoke gbox128 12x2 boxnodyn,reprosum,run10day @@ -25,22 +27,130 @@ smoke gbox80 11x3 boxslotcyl,reprosum,run10day smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest smoke_gx3_8x4_diag1_reprosum_run10day smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_diag1_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread smoke_gx3_6x2_alt01_reprosum_run10day -smoke gx3 16x1 alt02,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt02_reprosum_run10day -smoke gx3 24x1 alt03,reprosum,run10day,cmplogrest,thread smoke_gx3_12x2_alt03_droundrobin_reprosum_run10day -smoke gx3 24x1 alt04,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt04_reprosum_run10day -smoke gx3 14x1 alt05,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt05_reprosum_run10day -smoke gx3 24x1 alt06,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt06_reprosum_run10day -smoke gx3 12x1 bgcz,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgcz_reprosum_run10day -smoke gx1 28x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day_seabedprob -smoke gx3 30x1 fsd12,reprosum,run10day,cmplogrest,thread smoke_gx3_14x2_fsd12_reprosum_run10day -smoke gx3 16x1 isotope,reprosum,run10day,cmplogrest,thread smoke_gx3_11x2_isotope_reprosum_run10day -smoke gx3 28x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_icdefault_reprosum_run10day_snwitdrdg_snwgrain -smoke gx3 18x1 dynpicard,reprosum,run10day,cmplogrest,thread smoke_gx3_6x4_dynpicard_reprosum_run10day -smoke gx3 20x1 zsal,reprosum,run10day,cmplogrest,thread smoke_gx3_8x3_reprosum_run10day_zsal - -smoke gbox128 20x1 reprosum,run10day,cmplogrest,thread smoke_gbox128_8x2_reprosum_run10day -smoke gbox128 16x1 boxnodyn,reprosum,run10day,cmplogrest,thread smoke_gbox128_12x2_boxnodyn_reprosum_run10day -smoke gbox128 14x1 boxadv,reprosum,run10day,cmplogrest,thread smoke_gbox128_9x2_boxadv_reprosum_run10day -smoke gbox128 24x1 boxrestore,reprosum,run10day,cmplogrest,thread smoke_gbox128_14x2_boxrestore_reprosum_run10day -smoke gbox80 19x1 box2001,reprosum,run10day,cmplogrest,thread smoke_gbox80_4x5_box2001_reprosum_run10day -smoke gbox80 8x4 boxslotcyl,reprosum,run10day,cmplogrest,thread smoke_gbox80_11x3_boxslotcyl_reprosum_run10day +smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt02_reprosum_run10day +smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread smoke_gx3_12x2_alt03_droundrobin_reprosum_run10day +smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt04_reprosum_run10day +smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt05_reprosum_run10day +smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt06_reprosum_run10day +smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgcz_reprosum_run10day +smoke gx1 18x1 reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day +smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day_seabedprob +smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread smoke_gx3_14x2_fsd12_reprosum_run10day +smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread smoke_gx3_11x2_isotope_reprosum_run10day +smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_icdefault_reprosum_run10day_snwitdrdg_snwgrain +smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread smoke_gx3_6x4_dynpicard_reprosum_run10day +smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread smoke_gx3_8x3_reprosum_run10day_zsal +smoke gx3 4x2x25x29x4 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day_thread +smoke gx3 8x4x5x4x80 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day_thread + +smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread smoke_gbox128_8x2_reprosum_run10day +smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread smoke_gbox128_12x2_boxnodyn_reprosum_run10day +smoke gbox128 8x1 boxadv,reprosum,run10day,cmplogrest,thread smoke_gbox128_9x2_boxadv_reprosum_run10day +smoke gbox128 8x1 boxrestore,reprosum,run10day,cmplogrest,thread smoke_gbox128_14x2_boxrestore_reprosum_run10day +smoke gbox80 8x1 box2001,reprosum,run10day,cmplogrest,thread smoke_gbox80_4x5_box2001_reprosum_run10day +smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread smoke_gbox80_11x3_boxslotcyl_reprosum_run10day + +#gridC + +smoke gx3 8x4 diag1,reprosum,run10day,gridc +smoke gx3 6x2 alt01,reprosum,run10day,gridc +smoke gx3 8x2 alt02,reprosum,run10day,gridc +#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridc +smoke gx3 4x4 alt04,reprosum,run10day,gridc +smoke gx3 4x4 alt05,reprosum,run10day,gridc +smoke gx3 8x2 alt06,reprosum,run10day,gridc +smoke gx3 8x2 bgcz,reprosum,run10day,gridc +smoke gx1 15x2 reprosum,run10day,gridc +smoke gx1 15x2 seabedprob,reprosum,run10day,gridc +smoke gx3 14x2 fsd12,reprosum,run10day,gridc +smoke gx3 11x2 isotope,reprosum,run10day,gridc +smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridc +#smoke gx3 6x4 dynpicard,reprosum,run10day,gridc +smoke gx3 8x3 zsal,reprosum,run10day,gridc +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc,thread + +smoke gbox128 8x2 reprosum,run10day,gridc +smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridc +#smoke gbox128 9x2 boxadv,reprosum,run10day,gridc +smoke gbox128 14x2 boxrestore,reprosum,run10day,gridc +smoke gbox80 4x5 box2001,reprosum,run10day,gridc +smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridc + +smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridc smoke_gx3_8x4_gridc_diag1_reprosum_run10day +smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_diag1_reprosum_run10day +smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x2_alt01_gridc_reprosum_run10day +smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt02_gridc_reprosum_run10day +#smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_12x2_alt03_droundrobin_gridc_reprosum_run10day +smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt04_gridc_reprosum_run10day +smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt05_gridc_reprosum_run10day +smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt06_gridc_reprosum_run10day +smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgcz_gridc_reprosum_run10day +smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day +smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day_seabedprob +smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_14x2_fsd12_gridc_reprosum_run10day +smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_11x2_gridc_isotope_reprosum_run10day +smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_icdefault_reprosum_run10day_snwitdrdg_snwgrain +#smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x4_dynpicard_gridc_reprosum_run10day +smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x3_gridc_reprosum_run10day_zsal +smoke gx3 4x2x25x29x4 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread +smoke gx3 8x4x5x4x80 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread + +smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_8x2_gridc_reprosum_run10day +smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_12x2_boxnodyn_gridc_reprosum_run10day +#smoke gbox128 8x1 boxadv,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_9x2_boxadv_gridc_reprosum_run10day +smoke gbox128 8x1 boxrestore,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_14x2_boxrestore_gridc_reprosum_run10day +smoke gbox80 8x1 box2001,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox80_4x5_box2001_gridc_reprosum_run10day +smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox80_11x3_boxslotcyl_gridc_reprosum_run10day + +#gridCD + +smoke gx3 8x4 diag1,reprosum,run10day,gridcd +smoke gx3 6x2 alt01,reprosum,run10day,gridcd +smoke gx3 8x2 alt02,reprosum,run10day,gridcd +#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridcd +smoke gx3 4x4 alt04,reprosum,run10day,gridcd +smoke gx3 4x4 alt05,reprosum,run10day,gridcd +smoke gx3 8x2 alt06,reprosum,run10day,gridcd +smoke gx3 8x2 bgcz,reprosum,run10day,gridcd +smoke gx1 15x2 reprosum,run10day,gridcd +smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd +smoke gx3 14x2 fsd12,reprosum,run10day,gridcd +smoke gx3 11x2 isotope,reprosum,run10day,gridcd +smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridcd +#smoke gx3 6x4 dynpicard,reprosum,run10day,gridcd +smoke gx3 8x3 zsal,reprosum,run10day,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd,thread + +smoke gbox128 8x2 reprosum,run10day,gridcd +smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridcd +#smoke gbox128 9x2 boxadv,reprosum,run10day,gridcd +smoke gbox128 14x2 boxrestore,reprosum,run10day,gridcd +smoke gbox80 4x5 box2001,reprosum,run10day,gridcd +smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridcd + +smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridcd smoke_gx3_8x4_gridcd_diag1_reprosum_run10day +smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_diag1_reprosum_run10day +smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x2_alt01_gridcd_reprosum_run10day +smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt02_gridcd_reprosum_run10day +#smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_12x2_alt03_droundrobin_gridcd_reprosum_run10day +smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt04_gridcd_reprosum_run10day +smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt05_gridcd_reprosum_run10day +smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt06_gridcd_reprosum_run10day +smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgcz_gridcd_reprosum_run10day +smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day +smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day_seabedprob +smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_14x2_fsd12_gridcd_reprosum_run10day +smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_11x2_gridcd_isotope_reprosum_run10day +smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_icdefault_reprosum_run10day_snwitdrdg_snwgrain +#smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x4_dynpicard_gridcd_reprosum_run10day +smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x3_gridcd_reprosum_run10day_zsal +smoke gx3 4x2x25x29x4 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread +smoke gx3 8x4x5x4x80 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread + +smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_8x2_gridcd_reprosum_run10day +smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_12x2_boxnodyn_gridcd_reprosum_run10day +#smoke gbox128 8x1 boxadv,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_9x2_boxadv_gridcd_reprosum_run10day +smoke gbox128 8x1 boxrestore,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_14x2_boxrestore_gridcd_reprosum_run10day +smoke gbox80 8x1 box2001,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox80_4x5_box2001_gridcd_reprosum_run10day +smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox80_11x3_boxslotcyl_gridcd_reprosum_run10day + diff --git a/configuration/scripts/tests/test_restart2.files b/configuration/scripts/tests/test_restart2.files new file mode 100644 index 000000000..7c22abe3a --- /dev/null +++ b/configuration/scripts/tests/test_restart2.files @@ -0,0 +1,2 @@ +test_nml.restart21 +test_nml.restart22 diff --git a/configuration/scripts/tests/test_restart2.script b/configuration/scripts/tests/test_restart2.script new file mode 100644 index 000000000..67760bbf4 --- /dev/null +++ b/configuration/scripts/tests/test_restart2.script @@ -0,0 +1,82 @@ + +# Build around a 2 day run with restart at day 1. +#----------------------------------------------------------- +# Run the CICE model baseline simulation + +cp ice_in ice_in.0 +${ICE_CASEDIR}/casescripts/parse_namelist.sh ice_in ${ICE_CASEDIR}/casescripts/test_nml.restart21 +cp ice_in ice_in.1 + +./cice.run +set res="$status" + +if ( $res != 0 ) then + mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev + cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} run" >! ${ICE_CASEDIR}/test_output + mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev + cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} test " >! ${ICE_CASEDIR}/test_output + rm -f ${ICE_CASEDIR}/test_output.prev + echo "FAIL ${ICE_TESTNAME} run" >> ${ICE_CASEDIR}/test_output + echo "FAIL ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output + exit 99 +endif + +# Prepend 'base_' to the final restart file to save for comparison +if ( "${ICE_IOTYPE}" == "binary" ) then + set end_date = `ls -t1 ${ICE_RUNDIR}/restart | head -1 | awk -F'.' '{print $NF}'` + foreach file (${ICE_RUNDIR}/restart/*${end_date}) + set surname = `echo $file | awk -F'/' '{print $NF}'` + mv $file ${ICE_RUNDIR}/restart/base_$surname + end +else + set test_file = `ls -t1 ${ICE_RUNDIR}/restart | head -1` + set test_data = ${ICE_RUNDIR}/restart/${test_file} + set base_data = ${ICE_RUNDIR}/restart/base_${test_file} + mv ${test_data} ${base_data} +endif + +#----------------------------------------------------------- +# Run the CICE model for the restart simulation + +# Modify the contents of the pointer file for restart +perl -i -pe's/(\d{4})-(\d{2})-(\d{2})/sprintf("%04d-%02d-%02d",$1,$2,$3-1)/e' ${ICE_RUNDIR}/ice.restart_file + +${ICE_CASEDIR}/casescripts/parse_namelist.sh ice_in ${ICE_CASEDIR}/casescripts/test_nml.restart22 +cp ice_in ice_in.2 + +./cice.run +set res="$status" + +cp ice_in.0 ice_in + +mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev +cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} run" >! ${ICE_CASEDIR}/test_output +mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev +cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} test" >! ${ICE_CASEDIR}/test_output +rm -f ${ICE_CASEDIR}/test_output.prev + +if ( $res != 0 ) then + echo "FAIL ${ICE_TESTNAME} run " >> ${ICE_CASEDIR}/test_output + echo "FAIL ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output + exit 99 +else + set log_file = `ls -t1 ${ICE_RUNDIR}/cice.runlog* | head -1` + set ttimeloop = `grep TimeLoop ${log_file} | grep Timer | cut -c 22-32` + set tdynamics = `grep Dynamics ${log_file} | grep Timer | cut -c 22-32` + set tcolumn = `grep Column ${log_file} | grep Timer | cut -c 22-32` + if (${ttimeloop} == "") set ttimeloop = -1 + if (${tdynamics} == "") set tdynamics = -1 + if (${tcolumn} == "") set tcolumn = -1 + echo "PASS ${ICE_TESTNAME} run ${ttimeloop} ${tdynamics} ${tcolumn}" >> ${ICE_CASEDIR}/test_output + + ${ICE_CASEDIR}/casescripts/comparebfb.csh ${ICE_RUNDIR}/restart + set bfbstatus = $status + if (${bfbstatus} == 0) then + echo "PASS ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output + else + echo "FAIL ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output + endif +endif + +#----------------------------------------------------------- + diff --git a/configuration/scripts/tests/unittest_suite.ts b/configuration/scripts/tests/unittest_suite.ts index 21810a1e3..76c9f4312 100644 --- a/configuration/scripts/tests/unittest_suite.ts +++ b/configuration/scripts/tests/unittest_suite.ts @@ -6,3 +6,8 @@ unittest gx3 1x1x25x29x16 sumchk unittest tx1 8x1 sumchk unittest gx3 4x1 bcstchk unittest gx3 1x1 bcstchk +unittest gx3 8x2 gridavgchk,dwblockall +unittest gx3 12x1 gridavgchk +unittest gx1 28x1 gridavgchk,dwblockall +unittest gx1 16x2 gridavgchk +unittest gbox128 8x2 gridavgchk diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index d216f7849..c17938d59 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -95,6 +95,7 @@ either Celsius or Kelvin units). "calc_dragio", "if true, calculate ``dragio`` from ``iceruf_ocn`` and ``thickness_ocn_layer1``", "F" "calc_strair", "if true, calculate wind stress", "T" "calc_Tsfc", "if true, calculate surface temperature", "T" + "capping", "parameter for capping method of viscosities", "1.0" "Cdn_atm", "atmospheric drag coefficient", "" "Cdn_ocn", "ocean drag coefficient", "" "Cf", "ratio of ridging work to PE change in ridging", "17." @@ -150,6 +151,8 @@ either Celsius or Kelvin units). "debug_model_task", "Local mpi task value that defines debug_model point output.", "" "debug_model_step", "Initial timestep for output from the debug_model flag.", "" "Delta", "function of strain rates (see Section :ref:`dynam`)", "1/s" + "deltaminEVP", "minimum value of Delta for EVP (see Section :ref:`dynam`)", "1/s" + "deltaminVP", "minimum value of Delta for VP (see Section :ref:`dynam`)", "1/s" "default_season", "Season from which initial values of forcing are set.", "winter" "denom1", "combination of constants for stress equation", "" "depressT", "ratio of freezing temperature to salinity of brine", "0.054 deg/ppt" @@ -165,6 +168,7 @@ either Celsius or Kelvin units). "distribution_weight", "weighting method used to compute work per block", "" "divu", "strain rate I component, velocity divergence", "1/s" "divu_adv", "divergence associated with advection", "1/s" + "DminTarea", "deltamin \* tarea", "m\ :math:`^2`/s" "dms", "dimethyl sulfide concentration", "mmol/m\ :math:`^3`" "dmsp", "dimethyl sulfoniopropionate concentration", "mmol/m\ :math:`^3`" "dpscale", "time scale for flushing in permeable ice", ":math:`1\times 10^{-3}`" @@ -184,10 +188,14 @@ either Celsius or Kelvin units). "dumpfreq_n", "restart output frequency", "" "dump_last", "if true, write restart on last time step of simulation", "" "dwavefreq", "widths of wave frequency bins", "1/s" + "dxe", "width of E cell (:math:`\Delta x`) through the middle", "m" "dxhy", "combination of HTE values", "" + "dxn", "width of N cell (:math:`\Delta x`) through the middle", "m" "dxt", "width of T cell (:math:`\Delta x`) through the middle", "m" "dxu", "width of U cell (:math:`\Delta x`) through the middle", "m" + "dye", "height of E cell (:math:`\Delta y`) through the middle", "m" "dyhx", "combination of HTN values", "" + "dyn", "height of N cell (:math:`\Delta y`) through the middle", "m" "dyn_dt", "dynamics and transport time step (:math:`\Delta t_{dyn}`)", "s" "dyt", "height of T cell (:math:`\Delta y`) through the middle", "m" "dyu", "height of U cell (:math:`\Delta y`) through the middle", "m" @@ -196,16 +204,18 @@ either Celsius or Kelvin units). "dvirdg(n)dt", "ice volume ridging rate (category n)", "m/s" "**E**", "", "" "e11, e12, e22", "strain rate tensor components", "" + "earea", "area of E-cell", "m\ :math:`^2`" "ecci", "yield curve minor/major axis ratio, squared", "1/4" "eice(n)", "energy of melting of ice per unit area (in category n)", "J/m\ :math:`^2`" + "emask", "land/boundary mask, T east edge (E-cell)", "" "emissivity", "emissivity of snow and ice", "0.985" "eps13", "a small number", "10\ :math:`^{-13}`" "eps16", "a small number", "10\ :math:`^{-16}`" "esno(n)", "energy of melting of snow per unit area (in category n)", "J/m\ :math:`^2`" - "etax2", "2 x eta (shear viscous coefficient)", "kg/s" + "etax2", "2 x eta (shear viscosity)", "kg/s" "evap", "evaporative water flux", "kg/m\ :math:`^2`/s" "ew_boundary_type", "type of east-west boundary condition", "" - "eyc", "coefficient for calculating the parameter E, 0\ :math:`<` eyc :math:`<`\ 1", "0.36" + "elasticDamp", "coefficient for calculating the parameter E, 0\ :math:`<` elasticDamp :math:`<`\ 1", "0.36" "e_yieldcurve", "yield curve minor/major axis ratio", "2" "e_plasticpot", "plastic potential minor/major axis ratio", "2" "**F**", "", "" @@ -277,8 +287,20 @@ either Celsius or Kelvin units). "fyear_init", "initial forcing data year", "" "**G**", "", "" "gravit", "gravitational acceleration", "9.80616 m/s\ :math:`^2`" + "grid_atm", "grid structure for atm forcing/coupling fields, 'A', 'B', 'C', etc", "" + "grid_atm_dynu", "grid for atm dynamic-u forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_atm_dynv", "grid for atm dynamic-v forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_atm_thrm", "grid for atm thermodynamic forcing/coupling fields, 'T', 'U', 'N', 'E'", "" "grid_file", "input file for grid info", "" "grid_format", "format of grid files", "" + "grid_ice", "structure of the model ice grid, ‘B’, ‘C’, etc", "" + "grid_ice_dynu", "grid for ice dynamic-u model fields, 'T', 'U', 'N', 'E'", "" + "grid_ice_dynv", "grid for ice dynamic-v model fields, 'T', 'U', 'N', 'E'", "" + "grid_ice_thrm", "grid for ice thermodynamic model fields, 'T', 'U', 'N', 'E'", "" + "grid_ocn", "grid structure for ocn forcing/coupling fields, 'A', 'B', 'C', etc", "" + "grid_ocn_dynu", "grid for ocn dynamic-u forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_ocn_dynv", "grid for ocn dynamic-v forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_ocn_thrm", "grid for ocn thermodynamic forcing/coupling fields, 'T', 'U', 'N', 'E'", "" "grid_type", "‘rectangular’, ‘displaced_pole’, ‘column’ or ‘regional’", "" "gridcpl_file", "input file for coupling grid info", "" "grow_net", "specific biogeochemistry growth rate per grid cell", "s :math:`^{-1}`" @@ -322,6 +344,9 @@ either Celsius or Kelvin units). "i0vis","fraction of penetrating visible solar radiation", "0.70" "iblkp","block on which to write debugging data", "" "i(j)block", "Cartesian i,j position of block", "" + "ice_data_conc", "ice initialization concentration, used mainly for box tests", "" + "ice_data_dist", "ice initialization distribution, used mainly for box tests", "" + "ice_data_type", "ice initialization mask, used mainly for box tests", "" "ice_hist_field", "type for history variables", "" "ice_ic", "choice of initial conditions (see :ref:`tab-ic`)", "" "ice_stdout", "unit number for standard output", "" @@ -357,6 +382,7 @@ either Celsius or Kelvin units). "kice", "thermal conductivity of fresh ice (:cite:`Bitz99`)", "2.03 W/m/deg" "kitd", "type of itd conversions (0 = delta function, 1 = linear remap)", "1" "kmt_file", "input file for land mask info", "" + "kmt_type", "file, default or boxislands", "file" "krdg_partic", "ridging participation function", "1" "krdg_redist", "ridging redistribution function", "1" "krgdn", "mean ridge thickness per thickness of ridging ice", "" @@ -415,6 +441,7 @@ either Celsius or Kelvin units). "my_task", "task ID for the current processor", "" "**N**", "", "" "n_aero", "number of aerosol species", "" + "narea", "area of N-cell", "m\ :math:`^2`" "natmiter", "number of atmo boundary layer iterations", "5" "nblocks", "number of blocks on current processor", "" "nblocks_tot", "total number of blocks in decomposition", "" @@ -436,6 +463,7 @@ either Celsius or Kelvin units). "nilyr", "number of ice layers in each category", "7" "nit", "nitrate concentration", "mmol/m\ :math:`^3`" "nlt_bgc_[chem]", "ocean sources and sinks for biogeochemistry", "" + "nmask", "land/boundary mask, T north edge (N-cell)", "" "nml_filename", "namelist file name", "" "nprocs", "total number of processors", "" "npt", "total run length values associate with npt_unit", "" @@ -574,7 +602,7 @@ either Celsius or Kelvin units). "rside", "fraction of ice that melts laterally", "" "rsnw", "snow grain radius", "10\ :math:`^{-6}` m" "rsnw_fall", "freshly fallen snow grain radius", "100. :math:`\times` 10\ :math:`^{-6}` m" - "rsnw_melt", "melting snow grain radius", "1000. :math:`\times` 10\ :math:`^{-6}` m" + "rsnw_mlt", "melting snow grain radius", "1000. :math:`\times` 10\ :math:`^{-6}` m" "rsnw_nonmelt", "nonmelting snow grain radius", "500. :math:`\times` 10\ :math:`^{-6}` m" "rsnw_sig", "standard deviation of snow grain radius", "250. :math:`\times` 10\ :math:`^{-6}` m" "rsnw_tmax", "maximum snow radius", "1500. :math:`\times` 10\ :math:`^{-6}` m" @@ -657,7 +685,6 @@ either Celsius or Kelvin units). "time_end", "ending time for history averages", "" "time_forc", "time of last forcing update", "s" "Timelt", "melting temperature of ice top surface", "0. C" - "tinyarea", "puny \* tarea", "m\ :math:`^2`" "Tinz", "Internal ice temperature", "C" "TLAT", "latitude of cell center", "radians" "TLON", "longitude of cell center", "radians" @@ -693,7 +720,7 @@ either Celsius or Kelvin units). "uatm", "wind velocity in the x direction", "m/s" "ULAT", "latitude of U-cell centers", "radians" "ULON", "longitude of U-cell centers", "radians" - "umask", "land/boundary mask, velocity (U-cell)", "" + "umask", "land/boundary mask, velocity corner (U-cell)", "" "umax_stab", "ice speed threshold (diagnostics)", "1. m/s" "umin", "min wind speed for turbulent fluxes", "1. m/s" "uocn", "ocean current in the x-direction", "m/s" @@ -711,6 +738,7 @@ either Celsius or Kelvin units). "vice(n)", "volume per unit area of ice (in category n)", "m" "vicen_init", "ice volume at beginning of timestep", "m" "viscosity_dyn", "dynamic viscosity of brine", ":math:`1.79\times 10^{-3}` kg/m/s" + "visc_method", "method for calculating viscosities (‘avg_strength’ or ‘avg_zeta’)", "avg_strength" "vocn", "ocean current in the y-direction", "m/s" "vonkar", "von Karman constant", "0.4" "vraftn", "volume of rafted ice", "m" @@ -738,7 +766,7 @@ either Celsius or Kelvin units). "yieldstress11(12, 22)", "yield stress tensor components", "" "year_init", "the initial year", "" "**Z**", "", "" - "zetax2", "2 x zeta (bulk viscous coefficient)", "kg/s" + "zetax2", "2 x zeta (bulk viscosity)", "kg/s" "zlvl", "atmospheric level height (momentum)", "m" "zlvs", "atmospheric level height (scalars)", "m" "zref", "reference height for stability", "10. m" diff --git a/doc/source/master_list.bib b/doc/source/master_list.bib index e4afa0c46..b563414d7 100644 --- a/doc/source/master_list.bib +++ b/doc/source/master_list.bib @@ -77,7 +77,7 @@ @incollection{Assur58 volume = {598}, pages = {106-138} } -@Article{Schwarzacher59 +@Article{Schwarzacher59, author = "W. Schwarzacher", title = "{Pack ice studies in the Arctic Ocean}", journal = JGR, @@ -86,7 +86,7 @@ @Article{Schwarzacher59 pages = {2357-2367}, url = {http://dx.doi.org/10.1029/JZ064i012p02357} } -@Article{Untersteiner64 +@Article{Untersteiner64, author = "N. Untersteiner", title = "{Calculations of temperature regime and heat budget of sea ice in the Central Arctic}", journal = JGR, @@ -105,7 +105,7 @@ @incollection{Ono67 volume = "I", pages = "599--610" } -@Article{Maykut71 +@Article{Maykut71, author = "G.A. Maykut and N. Untersteiner", title = "{Some results from a time dependent thermodynamic model of sea ice}", journal = JGR, @@ -114,7 +114,7 @@ @Article{Maykut71 pages = {1550-1575}, url = {http://dx.doi.org/10.1029/JC076i006p01550} } -@Book{Stroud71 +@Book{Stroud71, author = "A.H. Stroud", title = "{Approximate Calculation of Multiple Integrals}", publisher = "Prentice-Hall", @@ -122,7 +122,7 @@ @Book{Stroud71 year = {1971}, pages = {431 pp}, } -@Article{Arya75 +@Article{Arya75, author = "S.P.S. Arya", title = "{A drag partition theory for determining the large-scale roughness parameter and wind stress on the Arctic pack ice}", journal = JGR, @@ -131,7 +131,7 @@ @Article{Arya75 pages = {3447-3454}, url = {http://dx.doi.org/10.1029/JC080i024p03447} } -@Article{Rothrock75 +@Article{Rothrock75, author = "D.A. Rothrock", title = "{The energetics of plastic deformation of pack ice by ridging}", journal = JGR, @@ -140,7 +140,7 @@ @Article{Rothrock75 pages = {4514-4519}, url = {http://dx.doi.org/10.1029/JC080i033p04514} } -@Article{Thorndike75 +@Article{Thorndike75, author = "A.S. Thorndike and D.A. Rothrock and G.A. Maykut and R. Colony", title = "{The thickness distribution of sea ice}", journal = JGR, @@ -149,7 +149,7 @@ @Article{Thorndike75 pages = {4501-4513}, url = {http://dx.doi.org/10.1029/JC080i033p04501} } -@Article{Semtner76 +@Article{Semtner76, author = "A.J. Semtner", title = "{A Model for the Thermodynamic Growth of Sea Ice in Numerical Investigations of Climate}", journal = JPO, @@ -158,7 +158,7 @@ @Article{Semtner76 pages = {379-389}, url = {http://dx.doi.org/10.1175/1520-0485(1976)006<0379:AMFTTG>2.0.CO;2} } -@Article{Hibler79 +@Article{Hibler79, author = "W.D. Hibler", title = "{A dynamic thermodynamic sea ice model}", journal = JPO, @@ -167,7 +167,7 @@ @Article{Hibler79 pages = {817-846}, url = {http://dx.doi.org/10.1175/1520-0485(1979)009<0815:ADTSIM>2.0.CO;2} } -@Article{Parkinson79 +@Article{Parkinson79, author = "C.L. Parkinson and W.M. Washington", title = "{A large-scale numerical model of sea ice}", journal = JGRO, @@ -177,7 +177,7 @@ @Article{Parkinson79 pages = {331-337}, url = {http://dx.doi.org/10.1029/JC084iC01p00311} } -@Article{Zalesak79 +@Article{Zalesak79, author = "S. T. Zalesak", title = "{Fully multidimensional flux-corrected transport algorithms for fluids}", journal = JCP, @@ -187,7 +187,7 @@ @Article{Zalesak79 pages = {335-362}, url = {http://dx.doi.org/10.1016/0021-9991(79)90051-2} } -@Article{Hibler80 +@Article{Hibler80, author = "W.D. Hibler", title = "{Modeling a variable thickness sea ice cover}", journal = MWR, @@ -196,7 +196,7 @@ @Article{Hibler80 pages = {1943-1973}, url = {http://dx.doi.org/10.1175/1520-0493(1980)108<1943:MAVTSI>2.0.CO;2} } -@Article{Maykut82 +@Article{Maykut82, author = "G.A. Maykut", title = "{Large-scale heat exchange and ice production in the central Arctic}", journal = JGRO, @@ -205,7 +205,7 @@ @Article{Maykut82 pages = {7971-7984}, url = {http://dx.doi.org/10.1029/JC087iC10p07971} } -@incollection{Siedler86 +@incollection{Siedler86, author = "G. Siedler and H. Peters", title = "Physical properties (general) of sea water", booktitle = "Landolt-Börnstein: Numerical data and functional relationships in science and technology, New Series V/3a", @@ -213,7 +213,7 @@ @incollection{Siedler86 year = {1986}, pages = {233-264}, } -@Article{Hibler87 +@Article{Hibler87, author = "W.D. Hibler and K. Bryan", title = "{A diagnostic ice-ocean model}", journal = JPO, @@ -222,7 +222,7 @@ @Article{Hibler87 pages = {987-1015}, url = {http://dx.doi.org/10.1175/1520-0485(1987)017<0987:ADIM>2.0.CO;2} } -@Article{Maykut87 +@Article{Maykut87, author = "G.A. Maykut and D.K. Perovich", title = "{The role of shortwave radiation in the summer decay of a sea ice cover}", journal = JGRO, @@ -231,7 +231,7 @@ @Article{Maykut87 pages = {7032-7044}, url = {http://dx.doi.org/10.1029/JC092iC07p07032} } -@Article{Rosati88 +@Article{Rosati88, author = "A. Rosati and K. Miyakoda", title = "{A general circulation model for upper ocean simulation}", journal = JPO, @@ -240,7 +240,7 @@ @Article{Rosati88 pages = {1601-1626}, url = {http://dx.doi.org/10.1175/1520-0485(1988)018<1601:AGCMFU>2.0.CO;2} } -@Article{Steele92 +@Article{Steele92, author = "M. Steele", title = "{Sea ice melting and floe geometry in a simple ice-ocean model}", journal = JGRO, @@ -249,7 +249,7 @@ @Article{Steele92 pages = {17729-17738}, url = {http://dx.doi.org/10.1029/92JC01755} } -@Article{Smith92 +@Article{Smith92, author = "R.D. Smith and J.K. Dukowicz and R.C. Malone", title = "{Parallel ocean general circulation modeling}", journal = PHYS, @@ -259,7 +259,7 @@ @Article{Smith92 pages = {38-61}, url = {http://dx.doi.org/10.1016/0167-2789(92)90225-C} } -@Article{Arrigo93 +@Article{Arrigo93, author = "K.R. Arrigo and J.N. Kremer and C.W. Sullivan", title = "{A simulated Antarctic fast ice ecosystem}", journal = JGRO, @@ -268,7 +268,7 @@ @Article{Arrigo93 pages = {6929-6946}, url = {http://dx.doi.org/10.1029/93JC00141} } -@Article{Dukowicz93 +@Article{Dukowicz93, author = "J.K. Dukowicz and R.D. Smith and R.C. Malone", title = "{A reformulation and implementation of the Bryan-Cox-Semtner ocean model on the connection machine}", journal = JTECH, @@ -278,7 +278,18 @@ @Article{Dukowicz93 pages = {195-208}, url = {http://dx.doi.org/10.1175/1520-0426(1993)010<0195:ARAIOT>2.0.CO;2} } -@Article{Dukowicz94 +@Article{Saad93, + author = "Y. Saad", + title = "{A Flexible Inner-Outer Preconditioned GMRES Algorithm}", + journal = SIAMJCP, + volume = {14}, + number = {2}, + year = {1993}, + pages = {461-469}, + doi = {10.1137/0914028}, + URL = {https://doi.org/10.1137/0914028} +} +@Article{Dukowicz94, author = "J.K. Dukowicz and R.D. Smith and R.C. Malone", title = "{Implicit free-surface method for the Bryan-Cox-Semtner ocean model}", journal = JGRO, @@ -288,7 +299,7 @@ @Article{Dukowicz94 pages = {7991-8014}, url = {http://dx.doi.org/10.1029/93JC03455} } -@Article{Ebert95 +@Article{Ebert95, author = "E.E. Ebert and J.L. Schramm and J.A. Curry", title = "{Disposition of solar radiation in sea ice and the upper ocean}", journal = JGRO, @@ -297,7 +308,7 @@ @Article{Ebert95 pages = {15965-15975}, url = {http://dx.doi.org/10.1029/95JC01672} } -@Article{Flato95 +@Article{Flato95, author = "G.M. Flato and W.D. Hibler", title = "{Ridging and strength in modeling the thickness distribution of Arctic sea ice}", journal = JGRO, @@ -306,7 +317,7 @@ @Article{Flato95 pages = {18611-18626}, url = {http://dx.doi.org/10.1029/95JC02091} } -@Article{Maykut95 +@Article{Maykut95, author = "G.A. Maykut and M.G. McPhee", title = "{Solar heating of the Arctic mixed layer}", journal = JGRO, @@ -315,7 +326,7 @@ @Article{Maykut95 pages = {24691-24703}, url = {http://dx.doi.org/10.1029/95JC02554} } -@Manual{Smith95 +@Manual{Smith95, author = "R.D. Smith and S. Kortas and B. Meltz", title = "{Curvilinear coordinates for global ocean models}", organization = "Technical Report LA-UR-95-1146, Los Alamos National Laboratory", @@ -332,7 +343,7 @@ @Article{Zwiers95 pages = {336-351}, url = {http://dx.doi.org/10.1175/1520-0442(1995)008<0336:TSCIAI>2.0.CO;2} } -@Article{Murray96 +@Article{Murray96, author = "R.J. Murray", title = "{Explicit generation of orthogonal grids for ocean models}", journal = JCT, @@ -341,7 +352,7 @@ @Article{Murray96 pages = {251-273}, url = {http://dx.doi.org/10.1006/jcph.1996.0136} } -@Article{Hunke97 +@Article{Hunke97, author = "E.C. Hunke and J.K. Dukowicz", title = "{An elastic-viscous-plastic model for sea ice dynamics}", journal = JPO, @@ -350,7 +361,7 @@ @Article{Hunke97 pages = {1849-1867}, url = {http://dx.doi.org/10.1175/1520-0485(1997)027<1849:AEVPMF>2.0.CO;2} } -@Article{Steele97 +@Article{Steele97, author = "M. Steele and J. Zhang and D. Rothrock and H. Stern", title = "{The force balance of sea ice in a numerical model of the Arctic Ocean}", journal = JGRO, @@ -360,7 +371,7 @@ @Article{Steele97 pages = {21061-21079}, url = {http://dx.doi.org/10.1029/97JC01454} } -@Article{Geiger98 +@Article{Geiger98, author = "C.A. Geiger and W.D. Hibler and S.F. Ackley", title = "{Large-scale sea ice drift and deformation: Comparison between models and observations in the western Weddell Sea during 1992}", journal = JGRO, @@ -370,7 +381,7 @@ @Article{Geiger98 pages = {21893-21913}, url = {http://dx.doi.org/10.1029/98JC01258} } -@Book{Lipscomb98 +@Book{Lipscomb98, author = "W.H. Lipscomb", title = "{Modeling the Thickness Distribution of Arctic Sea Ice}", publisher = "Dept. of Atmospheric Sciences University of Washington, Seattle", @@ -378,7 +389,7 @@ @Book{Lipscomb98 year = {1998}, url = {http://hdl.handle.net/1773/10081} } -@Article{Bitz99 +@Article{Bitz99, author = "C.M. Bitz and W.H. Lipscomb", title = "{An energy-conserving thermodynamic sea ice model for climate study}", journal = JGRO, @@ -388,7 +399,7 @@ @Article{Bitz99 pages = {15669-15677}, url = {http://dx.doi.org/10.1029/1999JC900100} } -@Article{Hunke99 +@Article{Hunke99, author = "E.C. Hunke and Y. Zhang", title = "{A comparison of sea ice dynamics models at high resolution}", journal = MWR, @@ -397,7 +408,7 @@ @Article{Hunke99 pages = {396-408}, url = {http://dx.doi.org/10.1175/1520-0493(1999)127<0396:ACOSID>2.0.CO;2} } -@Article{Jordan99 +@Article{Jordan99, author = "R.E. Jordan and E.L. Andreas and A.P. Makshtas", title = "{Heat budget of snow-covered sea ice at North Pole 4}", journal = JGRO, @@ -407,7 +418,7 @@ @Article{Jordan99 pages = {7785-7806}, url = {http://dx.doi.org/10.1029/1999JC900011} } -@Book{vonstorch99 +@Book{vonstorch99, author = "H. von Storch and F.W. Zwiers", title = "{Statistical Analysis in Climate Research}", publisher = "Cambridge University Press", @@ -415,7 +426,7 @@ @Book{vonstorch99 year = {1999}, pages = {484 pp}, } -@Article{Dukowicz00 +@Article{Dukowicz00, author = "J.K. Dukowicz and J.R. Baumgardner", title = "{Incremental remapping as a transport/advection algorithm}", journal = JCT, @@ -424,7 +435,16 @@ @Article{Dukowicz00 pages = {318-335}, url = {http://dx.doi.org/10.1006/jcph.2000.6465} } -@Article{Bitz01 +@Article{Kreyscher00, + author = "M. Kreyscher and M. Harder and P. Lemke and G.M. Flato", + title = "{Results of the {S}ea {I}ce {M}odel {I}ntercomparison {P}roject: evaluation of sea ice rheology schemes for use in climate simulations}", + journal = JGR, + year = {2000}, + volume = {105}, + number = {C5}, + pages = {11299-11320} +} +@Article{Bitz01, author = "C.M. Bitz and M.M. Holland and M. Eby and A.J. Weaver", title = "{Simulating the ice-thickness distribution in a coupled climate model}", journal = JGRO, @@ -433,7 +453,7 @@ @Article{Bitz01 pages = {2441-2463}, url = {http://dx.doi.org/10.1029/1999JC000113} } -@Article{Hunke01 +@Article{Hunke01, author = "E.C. Hunke", title = "{Viscous-plastic sea ice dynamics with the EVP model: Linearization issues}", journal = JCP, @@ -442,7 +462,7 @@ @Article{Hunke01 pages = {18-38}, url = {http://dx.doi.org/10.1006/jcph.2001.6710} } -@Article{Lipscomb01 +@Article{Lipscomb01, author = "W.H. Lipscomb", title = "{Remapping the thickness distribution in sea ice models}", journal = JGRO, @@ -451,7 +471,7 @@ @Article{Lipscomb01 pages = {13989-14000}, url = {http://dx.doi.org/10.1029/2000JC000518} } -@Article{He01 +@Article{He01, author = "Y. He and C.H.Q. Ding", title = "{Using Accurate Arithmetics to Improve Numerical Reproducibility and Stability in Parallel Applications}", journal = JOS, @@ -461,7 +481,7 @@ @Article{He01 pages = {259-277}, url = {http://dx.doi.org/10.1023/A:1008153532043} } -@Article{Schulson01 +@Article{Schulson01, author = "E.M. Schulson", title = "{Brittle failure of ice}", journal = EFM, @@ -480,7 +500,7 @@ @Article{Taylor01 pages = {7183-7192}, url = {http://dx.doi.org/10.1029/2000JD900719} } -@Article{Trodahl01 +@Article{Trodahl01, author = "H.J. Trodahl and S.O.F. Wilkinson and M.J. McGuinness and T.G. Haskeel", title = "{Thermal conductivity of sea ice: dependence on temperature and depth}", journal = GRL, @@ -489,7 +509,7 @@ @Article{Trodahl01 pages = {1279-1282}, url = {http://dx.doi.org/10.1029/2000GL012088} } -@Article{Hunke02 +@Article{Hunke02, author = "E.C. Hunke and J.K. Dukowicz", title = "{The Elastic-Viscous-Plastic sea ice dynamics model in general orthogonal curvilinear coordinates on a sphere—Effect of metric terms}", journal = MWR, @@ -498,21 +518,21 @@ @Article{Hunke02 pages = {1848-1865}, url = {http://dx.doi.org/10.1175/1520-0493(2002)130<1848:TEVPSI>2.0.CO;2} } -@Manual{Kauffman02 +@Manual{Kauffman02, author = "B.G. Kauffman and W.G. Large", title = "{The CCSM coupler, version 5.0.1}", journal = NTN, year = {2002}, url = {https://github.com/CICE-Consortium/CICE/blob/master/doc/PDF/KL_NCAR2002.pdf} } -@Manual{Hunke03 +@Manual{Hunke03, author = "E.C. Hunke and J.K. Dukowicz", title = "{The sea ice momentum equation in the free drift regime}", organization = "Technical Report LA-UR-03-2219, Los Alamos National Laboratory", year = {2003}, url = {https://github.com/CICE-Consortium/CICE/blob/master/doc/PDF/LAUR-03-2219.pdf} } -@Article{Amundrud04 +@Article{Amundrud04, author = "T.L. Amundrud and H. Malling and R.G. Ingram", title = "{Geometrical constraints on the evolution of ridged sea ice}", journal = JGRO, @@ -521,7 +541,7 @@ @Article{Amundrud04 issue = {C6}, url = {http://dx.doi.org/10.1029/2003JC002251} } -@Article{Connolley04 +@Article{Connolley04, author = "W.M. Connolley and J.M. Gregory and E.C. Hunke and A.J. McLaren", title = "{On the consistent scaling of terms in the sea ice dynamics equation}", journal = JPO, @@ -530,7 +550,7 @@ @Article{Connolley04 pages = {1776-1780}, url = {http://dx.doi.org/10.1175/1520-0485(2004)034<1776:OTCSOT>2.0.CO;2} } -@Article{Eicken04 +@Article{Eicken04, author = "H. Eicken and T.C. Grenfell and D.K. Perovich and J.A Richter-Menge and K. Frey", title = "{Hydraulic controls of summer Arctic pack ice albedo}", journal = JGRO, @@ -539,7 +559,7 @@ @Article{Eicken04 issue = {C8}, url = {http://dx.doi.org/10.1029/2003JC001989} } -@Article{Lipscomb04 +@Article{Lipscomb04, author = "W.H. Lipscomb and E.C. Hunke", title = "{Modeling sea ice transport using incremental remapping}", journal = MWR, @@ -548,7 +568,7 @@ @Article{Lipscomb04 pages = {1341-1354}, url = {http://dx.doi.org/10.1175/1520-0493(2004)132<1341:MSITUI>2.0.CO;2} } -@Article{Taylor04 +@Article{Taylor04, author = "P.D. Taylor and D.L. Feltham", title = "{A model of melt pond evolution on sea ice}", journal = JGRO, @@ -557,7 +577,7 @@ @Article{Taylor04 issue = {C12}, url = {http://dx.doi.org/10.1029/2004JC002361} } -@Article{Wilchinsky04 +@Article{Wilchinsky04, author = "A.V. Wilchinsky and D.L. Feltham", title = "{Dependence of sea ice yield-curve shape on ice thickness}", journal = JPO, @@ -575,7 +595,7 @@ @Article{Lavoie05 issue = {C11}, url = {http://dx.doi.org/10.1029/2005JC002922} } -@Book{Notz05 +@Book{Notz05, author = "D. Notz", title = "Thermodynamic and Fluid-Dynamical Processes in Sea Ice", publisher = "University of Cambridge, UK", @@ -583,7 +603,7 @@ @Book{Notz05 year = {2005}, url = {http://ulmss-newton.lib.cam.ac.uk/vwebv/holdingsInfo?bibId=27224} } -@Article{Feltham06 +@Article{Feltham06, author = "D.L. Feltham and N. Untersteiner and J.S. Wettlaufer and M.G. Worster", title = "{Sea ice is a mushy layer}", journal = GRL, @@ -618,7 +638,7 @@ @Article{Jin06 pages = {63-72}, url = {https://github.com/CICE-Consortium/CICE/blob/master/doc/PDF/JDWSTWLG06.pdf} } -@Article{Wilchinsky06 +@Article{Wilchinsky06, author = "A.V. Wilchinsky and D.L. Feltham", title = "{Modelling the rheology of sea ice as a collection of diamond-shaped floes}", journal = JNON, @@ -627,7 +647,7 @@ @Article{Wilchinsky06 pages = {22-32}, url = {http://dx.doi.org/10.1016/j.jnnfm.2006.05.001} } -@Book{Wilks06 +@Book{Wilks06, author = "D.S. Wilks", title = "{Statistical methods in the atmospheric sciences}", publisher = "Academic Press", @@ -635,14 +655,14 @@ @Book{Wilks06 year = {2006}, pages = {627 pp}, } -@Manual{Briegleb07 +@Manual{Briegleb07, author = "B.P. Briegleb and B. Light", title = "{A Delta-Eddington multiple scattering parameterization for solar radiation in the sea ice component of the Community Climate System Model}", organization = "NCAR Technical Note NCAR/TN-472+STR, National Center for Atmospheric Research", year = {2007}, url = {https://github.com/CICE-Consortium/CICE/blob/master/doc/PDF/BL_NCAR2007.pdf} } -@Article{Flocco07 +@Article{Flocco07, author = "D. Flocco and D.L. Feltham", title = "{A continuum model of melt pond evolution on Arctic sea ice}", journal = JGRO, @@ -651,7 +671,7 @@ @Article{Flocco07 number = {C8}, url = {http://dx.doi.org/10.1029/2006JC003836} } -@Article{Golden07 +@Article{Golden07, author = "K.M. Golden and H. Eicken and A.L. Heaton and J. Miner and D.J. Pringle and J. Zhu", title = "{Thermal evolution of permeability and microstructure in sea ice}", journal = GRL, @@ -660,7 +680,7 @@ @Article{Golden07 issue = {16}, url = {http://dx.doi.org/10.1029/2007GL030447} } -@Article{Hunke07 +@Article{Hunke07, author = "E. Hunke and M.M. Holland", title = "{Global atmospheric forcing data for Arctic ice-ocean modeling}", journal = JGRO, @@ -669,7 +689,7 @@ @Article{Hunke07 number = {C4}, url = {http://dx.doi.org/10.1029/2006JC003640} } -@Article{Lipscomb07 +@Article{Lipscomb07, author = "W.H. Lipscomb and E.C. Hunke and W. Maslowski and J. Jakacki", title = "{Ridging, strength, and stability in high-resolution sea ice models}", journal = JGRO, @@ -678,7 +698,7 @@ @Article{Lipscomb07 issue = {C3}, url = {http://dx.doi.org/10.1029/2005JC003355} } -@Article{Pringle07 +@Article{Pringle07, author = "D.J. Pringle and H. Eicken and H.J. Trodahl and L.G.E. Backstrom", title = "{Thermal conductivity of landfast Antarctic and Arctic sea ice}", journal = JGRO, @@ -687,7 +707,7 @@ @Article{Pringle07 issue = {C4}, url = {http://dx.doi.org/10.1029/2006JC003641} } -@Article{Stefels07 +@Article{Stefels07, author = "J. Stefels and M. Steinke and S. Turner and G. Malin and S. Belviso", title = "{Environmental constraints on the production and removal of the climatically active gas dimethylsulphide (DMS) and implications for ecosystem modelling}", journal = BGC, @@ -696,7 +716,20 @@ @Article{Stefels07 pages = {245-275}, url = {http://dx.doi.org/10.1007/978-1-4020-6214-8_18} } -@Article{Hunke09 +@Article{Lemieux08, + author = "J.-F. Lemieux and B. Tremblay and S. Thomas and J. Sedláček and L. A. Mysak", + title = "{Using the preconditioned Generalized Minimum RESidual (GMRES) method to solve the sea-ice momentum equation}", + journal = JGRO, + volume = {113}, + number = {C10}, + pages = {}, + keywords = {Sea ice, GMRES, Krylov subspace}, + doi = {10.1029/2007JC004680}, + url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2007JC004680}, + eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2007JC004680}, + year = {2008} +} +@Article{Hunke09, author = "E.C. Hunke and C.M. Bitz", title = "{Age characteristics in a multidecadal Arctic sea ice simulation}", journal = JGRO, @@ -705,7 +738,7 @@ @Article{Hunke09 issue = {CB}, url = {http://dx.doi.org/10.1029/2008JC005186} } -@Article{Large09 +@Article{Large09, author = "W.G. Large and S.G. Yeager", title = "{The global climatology of an interannually varying air-sea flux data set}", journal = OM, @@ -722,7 +755,7 @@ @Article{Tagliabue09 issue = {13}, url = {http://dx.doi.org/10.1029/2009GL038914} } -@Article{Weiss09 +@Article{Weiss09, author = "J. Weiss and E.M. Schulson", title = "{Coulombic faulting from the grain scale to the geophysical scale: lessons from ice}", journal = JPD, @@ -731,7 +764,7 @@ @Article{Weiss09 pages = {214017}, url = {http://dx.doi.org/10.1088/0022-3727/42/21/214017} } -@Article{Flocco10 +@Article{Flocco10, author = "D. Flocco and D.L. Feltham and A.K. Turner", title = "{Incorporation of a physically based melt pond scheme into the sea ice component of a climate model}", journal = JGRO, @@ -740,7 +773,7 @@ @Article{Flocco10 number = {C8}, url = {http://dx.doi.org/10.1029/2009JC005568} } -@Article{Konig10 +@Article{Konig10, author = "C. Konig Beatty and D.M. Holland", title = "{Modeling landfast ice by adding tensile strength}", journal = JPO, @@ -749,7 +782,7 @@ @Article{Konig10 pages = {185-198}, url = {http://dx.doi.org/10.1175/2009JPO4105.1} } -@Article{Armour11 +@Article{Armour11, author = "K.C. Armour and C.M. Bitz and L. Thompson and E.C. Hunke", title = "{Controls on Arctic sea ice from first-year and multi-year ice survivability}", journal = JC, @@ -758,7 +791,7 @@ @Article{Armour11 pages = {2378-2390}, url = {http://dx.doi.org/10.1175/2010JCLI3823.1} } -@Article{Deal11 +@Article{Deal11, author = "C. Deal and M. Jin and S. Elliott and E. Hunke and M. Maltrud and N. Jeffery", title = "{Large scale modeling of primary production and ice algal biomass within Arctic sea ice in 1992}", journal = JGRO, @@ -767,7 +800,7 @@ @Article{Deal11 issue = {C7}, url = {http://dx.doi.org/10.1029/2010JC006409} } -@Article{Lu11 +@Article{Lu11, author = "P. Lu and Z. Li and B. Cheng and M. Lepp{\"{a}}ranta", title = "{A parametrization fo the ice-ocean drag coefficient}", journal = JGRO, @@ -776,7 +809,7 @@ @Article{Lu11 number = {C7}, url = {http://dx.doi.org/10.1029/2010JC006878} } -@Article{Elliott12 +@Article{Elliott12, author = "S. Elliott and C. Deal and G. Humphries and E. Hunke and N. Jeffery and M. Jin and M. Levasseur and J. Stefels", title = "{Pan-Arctic simulation of coupled nutrient-sulfur cycling due to sea ice biology: Preliminary results}", journal = JGRB, @@ -785,7 +818,7 @@ @Article{Elliott12 issue = {G1}, url = {http://dx.doi.org/10.1029/2011JG001649} } -@Article{Flocco12 +@Article{Flocco12, author = "D. Flocco and D. Schroeder and D.L. Feltham and E.C. Hunke", title = "{Impact of melt ponds on Arctic sea ice simulations from 1990 to 2007}", journal = JGRO, @@ -794,7 +827,7 @@ @Article{Flocco12 number = {C9}, url = {http://dx.doi.org/10.1029/2012JC008195} } -@Article{Holland12 +@Article{Holland12, author = "M.M. Holland and D.A. Bailey and B.P. Briegleb and B. Light and E. Hunke", title = "{Improved sea ice shortwave radiation physics in CCSM4: The impact of melt ponds and aerosols on Arctic sea ice}", journal = JC, @@ -803,7 +836,7 @@ @Article{Holland12 pages = {1413-1430}, url = {http://dx.doi.org/10.1175/JCLI-D-11-00078.1} } -@Article{Lemieux12 +@Article{Lemieux12, author = "J.F. Lemieux and D.A. Knoll and B. Tremblay and D.M. Holland and M. Losch", title = "{A comparison of the {J}acobian-free {N}ewton {K}rylov method and the {EVP} model for solving the sea ice momentum equation with a viscous-plastic formulation: a serial algorithm study}", @@ -822,7 +855,7 @@ @Article{Lepparanta12 pages = {83-91}, doi = {http://dx.doi.org/10.1016/j.coldregions.2011.12.005} } -@Article{Lupkes12 +@Article{Lupkes12, author = "C. Lüpkes and V.M. Gryanik and J. Hartmann and E.L. Andreas", title = "{A parametrization, based on sea ice morphology, of the neutral atmospheric drag coefficients for weather prediction and climate models}", journal = JGRA, @@ -831,7 +864,7 @@ @Article{Lupkes12 number = {D13}, url = {http://dx.doi.org/10.1029/2012JD017630} } -@Article{Mirin12 +@Article{Mirin12, author = "A.A. Mirin and P.H. Worley", title = "{Improving the Performance Scalability of the Community Atmosphere Model}", journal = IJHPCA, @@ -841,7 +874,18 @@ @Article{Mirin12 pages = {17-30}, url = {http://dx.doi.org/10.1177/1094342011412630} } -@Article{Bouillon13 + +@Article{Bouillon09, + author = "S. Bouillon and M.A Morales Maqueda and V. Legat and T. Fichefet", + title = "{An elastic-viscous-plastic sea ice model formulated on Arakawa B and C grids}", + journal = OM, + year = {2009}, + volume = {27}, + pages = {174-184}, + url = {doi:10.1016/j.ocemod.2009.01.004} +} + +@Article{Bouillon13, author = "S. Bouillon and T. Fichefet and V. Legat and G. Madec", title = "{The elastic-viscous-plastic method revisited}", journal = OM, @@ -850,7 +894,7 @@ @Article{Bouillon13 pages = {1-12}, url = {http://dx.doi.org/10.1016/j.ocemod.2013.05.013} } -@Article{Hunke13 +@Article{Hunke13, author = "E.C. Hunke and D.A. Hebert and O. Lecomte", title = "{Level-ice melt ponds in the Los Alamos Sea Ice Model, CICE}", journal = OM, @@ -859,7 +903,7 @@ @Article{Hunke13 pages = {26-42}, url = {http://dx.doi.org/10.1016/j.ocemod.2012.11.008} } -@Article{Tsamados13 +@Article{Tsamados13, author = "M. Tsamados and D.L. Feltham and A.V. Wilchinsky", title = "{Impact of a new anisotropic rheology on simulations of Arctic sea ice}", journal = JGRO, @@ -868,7 +912,7 @@ @Article{Tsamados13 pages = {91-107}, url = {http://dx.doi.org/10.1029/2012JC007990} } -@Article{Turner13 +@Article{Turner13, author = "A.K. Turner and E.C. Hunke and C.M. Bitz", title = "{Two modes of sea-ice gravity drainage: a parameterization for large-scale modeling}", journal = JGRO, @@ -887,7 +931,7 @@ @Article{Craig14 pages = {154-165}, url = {http://dx.doi.org/10.1177/1094342014548771} } -@Article{Tsamados14 +@Article{Tsamados14, author = "M. Tsamados and D.L. Feltham and D. Schroeder and D. Flocco and S.L. Farrell and N.T. Kurtz and S.W. Laxon and S. Bacon", title = "{Impact of variable atmospheric and oceanic form drag on simulations of Arctic sea ice}", journal = JPO, @@ -896,7 +940,7 @@ @Article{Tsamados14 pages = {1329-1353}, url = {http://dx.doi.org/10.1175/JPO-D-13-0215.1} } -@Article{Kimmritz15 +@Article{Kimmritz15, author = "M. Kimmritz and S. Danilov and M. Losch", title = "{On the convergence of the modified elastic-viscous-plastic method for solving the sea ice momentum equation}", journal = JCP, @@ -905,6 +949,17 @@ @Article{Kimmritz15 pages = {90-100}, url = {http://dx.doi.org/10.1016/j.jcp.2015.04.051} } + +@Article{Kimmritz16, + author = "M. Kimmritz and S. Danilov and M. Losch", + title = "{The adaptive EVP method for solving the sea ice momentum equation}", + journal = OM, + year = {2016}, + volume = {101}, + pages = {59-67}, + url = {http://dx.doi.org/10.1016/j.ocemod.2016.03.004} +} + @Article{Roberts15, author = "A.F. Roberts and A.P. Craig and W. Maslowski and R. Osinski and A.K. DuVivier and M. Hughes and B. Nijssen and J.J. Cassano and M. Brunke", title = "{Simulating transient ice-ocean Ekman transport in the Regional Arctic System Model and Community Earth System Model}", @@ -915,7 +970,7 @@ @Article{Roberts15 pages = {211-228}, url = {http://dx.doi.org/10.3189/2015AoG69A760} } -@Article{Lemieux16 +@Article{Lemieux16, author = "J.F. Lemieux and F. Dupont and P. Blain and F. Roy and G.C. Smith and G.M. Flato", title = "{Improving the simulation of landfast ice by combining tensile strength and a parameterization for grounded ridges}", journal = JGRO, @@ -924,7 +979,7 @@ @Article{Lemieux16 pages = {7354-7368}, url = {http://dx.doi.org/10.1002/2016JC012006} } -@Article{Notz16 +@Article{Notz16, author = "D. Notz and A. Jahn and E. Hunke and F. Massonnet and J. Stroeve and B. Tremblay and M. Vancoppenolle", title = "{The CMIP6 Sea-Ice Model Intercomparison Project (SIMIP): understanding sea ice through climate-model simulations}", journal = GMD, @@ -950,15 +1005,40 @@ @Article{Roberts18 } @article{Roach19, -author = "L.A. Roach and C. M. Bitz and C. Horvat and S. M. Dean", -title = {{Advances in modelling interactions between sea ice and ocean surface waves}}, -journal = {Journal of Advances in Modeling Earth Systems}, -url = {http://doi.wiley.com/10.1029/2019MS001836}, -year={2019} + author = "L.A. Roach and C. M. Bitz and C. Horvat and S. M. Dean", + title = {{Advances in modelling interactions between sea ice and ocean surface waves}}, + journal = {Journal of Advances in Modeling Earth Systems}, + url = {http://doi.wiley.com/10.1029/2019MS001836}, + year={2019} } +@article{Koldunov19, + author = "N.V. Koldunov and S. Danilov and D. Sidorenko and N. Hutter and M. Losch and H. Goessling and N. Rakowsky and P. Scholz and D. Sein and Q. Wang and T. Jung", + title = {{Fast EVP solutions in a high-resolution sea ice model}}, + journal = {Journal of Advances in Modeling Earth Systems}, + volume={11}, + number={5}, + pages={1269-1284}, + year={2019}, + url = {http://doi.wiley.com/10.1029/2018MS001485} +} + + +@incollection{Arakawa77, + author = "A. Arakawa and V.R. Lamb", + title = "Computational Design of the Basic Dynamical Processes of the UCLA General Circulation Model", + editor = "Julius Chang", + series = "Methods in Computational Physics: Advances in Research and Applications", + publisher = {Elsevier}, + volume = {17}, + pages = {173-265}, + year = {1977}, + booktitle = "General Circulation Models of the Atmosphere", + issn = {0076-6860}, + doi = {https://doi.org/10.1016/B978-0-12-460817-7.50009-4}, + url = {https://www.sciencedirect.com/science/article/pii/B9780124608177500094}, +} -======= @article{Horvat15, author = "C. Horvat and E. Tziperman", journal = {The Cryosphere}, @@ -969,7 +1049,7 @@ @article{Horvat15 volume = {9}, year = {2015} } - @article{Roach18, +@article{Roach18, author = "L. A. Roach and C. Horvat and S. M. Dean and C. M. Bitz", url = {http://dx.doi.org/10.1029/2017JC013692}, journal = JGRO, @@ -980,7 +1060,7 @@ @article{Roach18 year = {2018} } -@Article{Ringeisen21 +@Article{Ringeisen21, author = "D. Ringeisen and L.B. Tremblay and M. Losch", title = "{Non-normal flow rules affect fracture angles in sea ice viscous-plastic rheologies}", journal = TC, @@ -999,31 +1079,6 @@ @Article{Tsujino18 pages = {79-139}, url = {http://dx.doi.org/10.1016/j.ocemod.2018.07.002} } -@Article{Lemieux08, - author = "J.-F. Lemieux and B. Tremblay and S. Thomas and J. Sedláček and L. A. Mysak", - title = "{Using the preconditioned Generalized Minimum RESidual (GMRES) method to solve the sea-ice momentum equation}", - journal = JGRO, - volume = {113}, - number = {C10}, - pages = {}, - keywords = {Sea ice, GMRES, Krylov subspace}, - doi = {10.1029/2007JC004680}, - url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2007JC004680}, - eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2007JC004680}, - year = {2008} -} -@Article{Saad93, - author = "Y. Saad", - title = "{A Flexible Inner-Outer Preconditioned GMRES Algorithm}", - journal = SIAMJCP, - volume = {14}, - number = {2}, - year = {1993}, - pages = {461-469}, - doi = {10.1137/0914028}, - URL = {https://doi.org/10.1137/0914028} -} - % ********************************************** % For new entries, see example entry in BIB_TEMPLATE.txt % ********************************************** diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index 9c529b8ec..287de001e 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -5,56 +5,6 @@ Dynamics ======== -There are different approaches in the CICE code for representing sea ice -rheology and for solving the sea ice momentum equation. The viscous-plastic (VP) originally developed by :cite:`Hibler79`, -the elastic-viscous-plastic (EVP) :cite:`Hunke97` model represents a modification of the -standard viscous-plastic (VP) model for sea ice dynamics. The elastic-anisotropic-plastic (EAP) model, -on the other hand, explicitly accounts for the observed sub-continuum -anisotropy of the sea ice cover :cite:`Wilchinsky06,Weiss09`. If -``kdyn`` = 1 in the namelist then the EVP model is used (module -**ice\_dyn\_evp.F90**), while ``kdyn`` = 2 is associated with the EAP -model (**ice\_dyn\_eap.F90**), and ``kdyn`` = 3 is associated with the -VP model (**ice\_dyn\_vp.F90**). - -At times scales associated with the -wind forcing, the EVP model reduces to the VP model while the EAP model -reduces to the anisotropic rheology described in detail in -:cite:`Wilchinsky06,Tsamados13`. At shorter time scales the -adjustment process takes place in both models by a numerically more -efficient elastic wave mechanism. While retaining the essential physics, -this elastic wave modification leads to a fully explicit numerical -scheme which greatly improves the model’s computational efficiency. - -The EVP sea ice dynamics model is thoroughly documented in -:cite:`Hunke97`, :cite:`Hunke01`, -:cite:`Hunke02` and :cite:`Hunke03` and the EAP -dynamics in :cite:`Tsamados13`. Simulation results and -performance of the EVP and EAP models have been compared with the VP -model and with each other in realistic simulations of the Arctic -respectively in :cite:`Hunke99` and -:cite:`Tsamados13`. - -The EVP numerical -implementation in this code release is that of :cite:`Hunke02` -and :cite:`Hunke03`, with revisions to the numerical solver as -in :cite:`Bouillon13`. The implementation of the EAP sea ice -dynamics into CICE is described in detail in -:cite:`Tsamados13`. - -The VP solver implementation mostly follows :cite:`Lemieux08`, with -FGMRES :cite:`Saad93` as the linear solver and GMRES as the preconditioner. -Note that the VP solver has not yet been tested on the ``tx1`` grid or with -threading enabled. - -Here we summarize the equations and -direct the reader to the above references for details. - -.. _momentum: - -******** -Momentum -******** - The force balance per unit area in the ice pack is given by a two-dimensional momentum equation :cite:`Hibler79`, obtained by integrating the 3D equation through the thickness of the ice in the @@ -93,19 +43,73 @@ For clarity, the two components of Equation :eq:`vpmom` are -C_bv-mfu - mg{\partial H_\circ\over\partial y}. \end{aligned} :label: momsys +On the B grid, the equations above are solved at the U point for the collocated u and v components (see figure :ref:`fig-Bgrid`). On the C grid, however, the two components are not collocated: the u component is at the E point while the v component is at the N point. -A bilinear discretization is used for the stress terms +The B grid spatial discretization is based on a variational method described in :cite:`Hunke97` and :cite:`Hunke02`. A bilinear discretization is used for the stress terms :math:`\partial\sigma_{ij}/\partial x_j`, which enables the discrete equations to be derived from the continuous equations written in curvilinear coordinates. In this manner, metric terms associated with the curvature of the grid are incorporated into the discretization explicitly. Details pertaining to -the spatial discretization are found in :cite:`Hunke02`. +the spatial discretization are found in :cite:`Hunke02` + +On the C grid, however, a finite difference approach is used for the spatial discretization. The C grid discretization is based on :cite:`Bouillon09`, :cite:`Bouillon13` and :cite:`Kimmritz16`. + +There are different approaches in the CICE code for representing sea ice +rheology and for solving the sea ice momentum equation: the viscous-plastic (VP) rheology :cite:`Hibler79` with an implicit method, +the elastic-viscous-plastic (EVP) :cite:`Hunke97` model which represents a modification of the +VP model, the revised EVP (rEVP) approach :cite:`Lemieux12,Bouillon13` and the elastic-anisotropic-plastic (EAP) model which explicitly accounts for the sub-continuum +anisotropy of the sea ice cover :cite:`Wilchinsky06,Weiss09`. If +``kdyn`` = 1 in the namelist then the EVP model is used (module +**ice\_dyn\_evp.F90**), while ``kdyn`` = 2 is associated with the EAP +model (**ice\_dyn\_eap.F90**), and ``kdyn`` = 3 is associated with the +VP model (**ice\_dyn\_vp.F90**). The rEVP approach can be used by setting ``kdyn`` = 1 and ``revised_evp`` = true in the namelist. + +At times scales associated with the +wind forcing, the EVP model reduces to the VP model while the EAP model +reduces to the anisotropic rheology described in detail in +:cite:`Wilchinsky06,Tsamados13`. At shorter time scales the +adjustment process takes place in both models by a numerically more +efficient elastic wave mechanism. While retaining the essential physics, +this elastic wave modification leads to a fully explicit numerical +scheme which greatly improves the model’s computational efficiency. The rEVP is also a fully explicit scheme which by construction should lead to the VP solution. + +The EVP sea ice dynamics model is thoroughly documented in +:cite:`Hunke97`, :cite:`Hunke01`, +:cite:`Hunke02` and :cite:`Hunke03` and the EAP +dynamics in :cite:`Tsamados13`. Simulation results and +performance of the EVP and EAP models have been compared with the VP +model and with each other in realistic simulations of the Arctic +respectively in :cite:`Hunke99` and +:cite:`Tsamados13`. + +The EVP numerical +implementation in this code release is that of :cite:`Hunke02` +and :cite:`Hunke03`, with revisions to the numerical solver as +in :cite:`Bouillon13`. Details about the rEVP solver can be found in :cite:`Lemieux12`, :cite:`Bouillon13`, :cite:`Kimmritz15` and :cite:`Koldunov19`. The implementation of the EAP sea ice +dynamics into CICE is described in detail in +:cite:`Tsamados13`. + +The VP solver implementation mostly follows :cite:`Lemieux08`, with +FGMRES :cite:`Saad93` as the linear solver and GMRES as the preconditioner. +Note that the VP solver has not yet been tested on the ``tx1`` grid or with +threading enabled. + +The EVP, rEVP, EAP and VP approaches are all available with the B grid. However, at the moment, only the EVP and rEVP schemes are possible with the C grid. + +Here we summarize the equations and +direct the reader to the above references for details. + +.. _momentumTS: + +********************** +Momentum time stepping +********************** .. _evp-momentum: -Elastic-Viscous-Plastic -~~~~~~~~~~~~~~~~~~~~~~~ +EVP time discretization and solution +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The momentum equation is discretized in time as follows, for the classic EVP approach. @@ -118,24 +122,23 @@ variables used in the code. .. math:: \underbrace{\left({m\over\Delta t_e}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} - - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{k+1} + - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{l} = &\underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} \\ &+ {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t_e}u^k, :label: umom .. math:: - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{k+1} + \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + \underbrace{\left({m\over\Delta t_e}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca}v^{k+1} = &\underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} \\ &+ {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t_e}v^k, :label: vmom -and :math:`{\tt vrel}\ \cdot\ {\tt waterx(y)}= {\tt taux(y)}`. +where :math:`{\tt vrel}\ \cdot\ {\tt waterx(y)}= {\tt taux(y)}` and the definitions of :math:`u^{l}` and :math:`v^{l}` vary depending on the grid. -We solve this system of equations analytically for :math:`u^{k+1}` and -:math:`v^{k+1}`. Define +As :math:`u` and :math:`v` are collocated on the B grid, :math:`u^{l}` and :math:`v^{l}` are respectively :math:`u^{k+1}` and :math:`v^{k+1}` such that this system of equations can be solved as follows. Define .. math:: \hat{u} = F_u + \tau_{ax} - mg{\partial H_\circ\over\partial x} + {\tt vrel} \left(U_w\cos\theta - V_w\sin\theta\right) + {m\over\Delta t_e}u^k @@ -169,10 +172,67 @@ where b = mf + {\tt vrel}\sin\theta. :label: cevpb +Note that the time discretization and solution method for the EAP is exactly the same as for the B grid EVP. More details on the EAP model are given in Section :ref:`stress-eap`. + +However, on the C grid, :math:`u` and :math:`v` are not collocated. When solving the :math:`u` momentum equation for :math:`u^{k+1}` (at the E point), :math:`v^{l}=v^{k}_{int}` where :math:`v^{k}_{int}` is :math:`v^{k}` from the surrounding N points interpolated to the E point. The same approach is used for the :math:`v` momentum equation. With this explicit treatment of the off-diagonal terms :cite:`Kimmritz16`, :math:`u^{k+1}` and :math:`v^{k+1}` are obtained by solving + +.. math:: + \begin{aligned} + u^{k+1} = {\hat{u} + b v^{k}_{int} \over a} \\ + v^{k+1} = {\hat{v} - b u^{k}_{int} \over a}. \end{aligned} + +.. _revp-momentum: + +Revised EVP time discretization and solution +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The revised EVP approach is based on a pseudo-time iterative scheme :cite:`Lemieux12`, :cite:`Bouillon13`, :cite:`Kimmritz15`. By construction, the revised EVP approach should lead to the VP solution +(given the right numerical parameters and a sufficiently large number of iterations). To do so, the inertial term is formulated such that it matches the backward Euler approach of +implicit solvers and there is an additional term for the pseudo-time iteration. Hence, with the revised approach, the discretized momentum equations :eq:`umom` and :eq:`vmom` become + +.. math:: + {\beta^*(u^{k+1}-u^k)\over\Delta t_e} + {m(u^{k+1}-u^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)} u^{k+1} + - & {\left(mf+{\tt vrel}\sin\theta\right)} v^{l} + = {{\partial\sigma_{1j}^{k+1}\over\partial x_j}} + + {\tau_{ax}} \\ + & - {mg{\partial H_\circ\over\partial x} } + + {\tt vrel} {\left(U_w\cos\theta-V_w\sin\theta\right)}, + :label: umomr + +.. math:: + {\beta^*(v^{k+1}-v^k)\over\Delta t_e} + {m(v^{k+1}-v^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)}v^{k+1} + + & {\left(mf+{\tt vrel}\sin\theta\right)} u^{l} + = {{\partial\sigma_{2j}^{k+1}\over\partial x_j}} + + {\tau_{ay}} \\ + & - {mg{\partial H_\circ\over\partial y} } + + {\tt vrel}{\left(U_w\sin\theta+V_w\cos\theta\right)}, + :label: vmomr + +where :math:`\beta^*` is a numerical parameter and :math:`u^n, v^n` are the components of the previous time level solution. +With :math:`\beta=\beta^* \Delta t \left( m \Delta t_e \right)^{-1}` :cite:`Bouillon13`, these equations can be written as + +.. math:: + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} + - \underbrace{\left(mf+{\tt vrel} \sin\theta\right)}_{\tt ccb} & v^{l} + = \underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} \\ + & + {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t}(\beta u^k + u^n), + :label: umomr2 + +.. math:: + \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca} & v^{k+1} + = \underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} \\ + & + {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t}(\beta v^k + v^n), + :label: vmomr2 + +At this point, the solutions :math:`u^{k+1}` and :math:`v^{k+1}` for the B or the C grids are obtained in the same manner as for the standard EVP approach (see Section :ref:`evp-momentum` for details). + .. _vp-momentum: -Viscous-Plastic -~~~~~~~~~~~~~~~ +Implicit (VP) time discretization and solution +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the VP approach, equation :eq:`momsys` is discretized implicitly using a Backward Euler approach, and stresses are not computed explicitly: @@ -218,6 +278,15 @@ The Picard iterative process stops when :math:`\left\lVert \mathbf{u}_{k} \right Parameters for the FGMRES linear solver and the preconditioner can be controlled using additional namelist flags (see :ref:`dynamics_nml`). + +.. _surfstress: + +******************** +Surface stress terms +******************** + +The formulation for the wind stress is described in `Icepack Documentation `_. Below, some details about the ice-ocean stress and the seabed stress are given. + Ice-Ocean stress ~~~~~~~~~~~~~~~~ @@ -231,9 +300,8 @@ pending further testing. .. _seabed-stress: -*************** Seabed stress -*************** +~~~~~~~~~~~~~ CICE includes two options for calculating the seabed stress, i.e. the term in the momentum equation that represents the interaction @@ -254,49 +322,64 @@ grounding schemes. It is suggested to have a bathymetry field with water depths larger than 5 m that represents well shallow water (less than 30 m) regions such as the Laptev Sea and the East Siberian Sea. -Seabed stress based on linear keel draft (LKD) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +**Seabed stress based on linear keel draft (LKD)** This parameterization for the seabed stress is described in :cite:`Lemieux16`. It assumes that the largest keel draft varies linearly with the mean thickness in a grid cell (i.e. sea ice volume). The :math:`C_b` coefficients are expressed as .. math:: - C_b= k_2 \max [0,(h_u - h_{cu})] e^{-\alpha_b * (1 - a_u)} (\sqrt{u^2+v^2}+u_0)^{-1}, \\ + C_b= k_2 \max [0,(h - h_{c})] e^{-\alpha_b * (1 - a)} (\sqrt{u^2+v^2}+u_0)^{-1}, \\ :label: Cb where :math:`k_2` determines the maximum seabed stress that can be sustained by the grounded parameterized ridge(s), :math:`u_0` is a small residual velocity and :math:`\alpha_b` is a parameter to ensure that the seabed stress quickly drops when -the ice concentration is smaller than 1. In the code, :math:`k_2 \max [0,(h_u - h_{cu})] e^{-\alpha_b * (1 - a_u)}` is defined as -:math:`T_b`. The quantities :math:`h_u`, :math:`a_{u}` and :math:`h_{cu}` are calculated at -the 'u' point based on local ice conditions (surrounding tracer points). They are respectively given by +the ice concentration is smaller than 1. In the code, :math:`k_2 \max [0,(h - h_{c})] e^{-\alpha_b * (1 - a)}` is defined as +:math:`T_b`. + +On the B grid, the quantities :math:`h`, :math:`a` and :math:`h_{c}` are calculated at +the U point and are referred to as :math:`h_u`, :math:`a_{u}` and :math:`h_{cu}`. They are respectively given by .. math:: h_u=\max[v_i(i,j),v_i(i+1,j),v_i(i,j+1),v_i(i+1,j+1)], \\ :label: hu .. math:: - a_u=\max[a_i(i,j),a_i(i+1,j),a_i(i,j+1),a_i(i+1,j+1)]. \\ + a_u=\max[a_i(i,j),a_i(i+1,j),a_i(i,j+1),a_i(i+1,j+1)], \\ :label: au .. math:: h_{cu}=a_u h_{wu} / k_1, \\ :label: hcu -where the :math:`a_i` and :math:`v_i` are the total ice concentrations and ice volumes around the :math:`u` point :math:`i,j` and +where the :math:`a_i` and :math:`v_i` are the total ice concentrations and ice volumes around the U point :math:`i,j` and :math:`k_1` is a parameter that defines the critical ice thickness :math:`h_{cu}` at which the parameterized ridge(s) reaches the seafloor for a water depth :math:`h_{wu}=\min[h_w(i,j),h_w(i+1,j),h_w(i,j+1),h_w(i+1,j+1)]`. Given the formulation of :math:`C_b` in equation :eq:`Cb`, the seabed stress components are non-zero only when :math:`h_u > h_{cu}`. +As :math:`u` and :math:`v` are not collocated on the C grid, :math:`T_b` is calculated at E and N points. For example, at the E point, :math:`h_e`, :math:`a_{e}` and :math:`h_{ce}` are respectively + +.. math:: + h_e=\max[v_i(i,j),v_i(i+1,j)], \\ + :label: he + +.. math:: + a_e=\max[a_i(i,j),a_i(i+1,j)], \\ + :label: ae + +.. math:: + h_{ce}=a_e h_{we} / k_1, \\ + :label: hce + +where :math:`h_{we}=\min[h_w(i,j),h_w(i+1,j)]`. Similar calculations are done at the N points. + +To prevent unrealistic grounding, :math:`T_b` is set to zero when :math:`h_{wu}` +is larger than 30 m (same idea on the C grid depending on :math:`h_{we}` and :math:`h_{wn}`). This maximum value is chosen based on observations of large keels in the Arctic Ocean :cite:`Amundrud04`. + The maximum seabed stress depends on the weight of the ridge above hydrostatic balance and the value of :math:`k_2`. It is, however, the parameter :math:`k_1` that has the most notable impact on the simulated extent of landfast ice. The value of :math:`k_1` can be changed at runtime using the namelist variable ``k1``. -To prevent unrealistic grounding, :math:`T_b` is set to zero when :math:`h_{wu}` -is larger than 30 m. This maximum value is chosen based on observations of large -keels in the Arctic Ocean :cite:`Amundrud04`. - -Seabed stress based on probabilistic approach -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +**Seabed stress based on probabilistic approach** This more sophisticated grounding parameterization computes the seabed stress based on the probability of contact between the ice thickness distribution @@ -325,7 +408,7 @@ ITD and the seabed is given by .. math:: P_c=\int_{0}^{\inf} \int_{0}^{D(x)} g(x)b(y) dy dx \label{prob_contact}. -:math:`T_b` is first calculated at the 't' point (referred to as :math:`T_{bt}`). :math:`T_{bt}` depends on the weight of the ridge in excess of hydrostatic balance. The parameterization first calculates +:math:`T_b` is first calculated at the T point (referred to as :math:`T_{bt}`). :math:`T_{bt}` depends on the weight of the ridge in excess of hydrostatic balance. The parameterization first calculates .. math:: T_{bt}^*=\mu_s g \int_{0}^{\inf} \int_{0}^{D(x)} (\rho_i x - \rho_w @@ -336,23 +419,35 @@ and then obtains :math:`T_{bt}` by multiplying :math:`T_{bt}^*` by :math:`e^{-\a To calculate :math:`T_{bt}^*` in equation :eq:`Tbt`, :math:`f(x)` and :math:`b(y)` are discretized using many small categories (100). :math:`f(x)` is discretized between 0 and 50 m while :math:`b(y)` is truncated at plus and minus three :math:`\sigma_b`. :math:`f(x)` is also modified by setting it to zero after a certain percentile of the log-normal distribution. This percentile, which is currently set to 99.7%, notably affects the simulation of landfast ice and is used as a tuning parameter. Its impact is similar to the one of the parameter :math:`k_1` for the LKD method. -:math:`T_b` at the 'u' point is calculated from the 't' point values around it according to +On the B grid, :math:`T_b` at the U point is calculated from the T point values around it according to .. math:: - T_b=\max[T_{bt}(i,j),T_{bt}(i+1,j),T_{bt}(i,j+1),T_{bt}(i+1,j+1)]. \\ + T_{bu}=\max[T_{bt}(i,j),T_{bt}(i+1,j),T_{bt}(i,j+1),T_{bt}(i+1,j+1)]. \\ :label: Tb Following again the LKD method, the seabed stress coefficients are finally expressed as .. math:: - C_b= T_b (\sqrt{u^2+v^2}+u_0)^{-1}, \\ + C_b= T_{bu} (\sqrt{u^2+v^2}+u_0)^{-1}. \\ :label: Cb2 +On the C grid, :math:`T_b` is needs to be calculated at the E and N points. :math:`T_{be}` and :math:`T_{bn}` are respectively given by + +.. math:: + T_{be}=\max[T_{bt}(i,j),T_{bt}(i+1,j)], \\ + :label: Tbe + +.. math:: + T_{bn}=\max[T_{bt}(i,j),T_{bt}(i,j+1)]. \\ + :label: Tbn + +The :math:`C_{b}` are different at the E and N points and are respectively :math:`T_{be} (\sqrt{u^2+v^2_{int}}+u_0)^{-1}` and :math:`T_{bn} (\sqrt{u^2_{int} + v^2}+u_0)^{-1}` where :math:`v_{int}` (:math:`u_{int}`) is :math:`v` ( :math:`u`) interpolated to the E (N) point. + .. _internal-stress: -******************************** -Internal stress -******************************** +******** +Rheology +******** For convenience we formulate the stress tensor :math:`\bf \sigma` in terms of :math:`\sigma_1=\sigma_{11}+\sigma_{22}`, @@ -398,19 +493,24 @@ An elliptical yield curve is used, with the viscosities given by .. math:: \zeta = {P(1+k_t)\over 2\Delta}, + :label: zeta .. math:: \eta = e_g^{-2} \zeta, + :label: eta where .. math:: \Delta = \left[D_D^2 + {e_f^2\over e_g^4}\left(D_T^2 + D_S^2\right)\right]^{1/2}. + :label: Delta + +When the deformation :math:`\Delta` tends toward zero, the viscosities tend toward infinity. To avoid this issue, :math:`\Delta` needs to be limited and is replaced by :math:`\Delta^*` in equation :eq:`zeta`. Two methods for limiting :math:`\Delta` (or for capping the viscosities) are available in the code. If the namelist parameter ``capping`` is set to 1., :math:`\Delta^*=max(\Delta, \Delta_{min})` :cite:`Hibler79` while with ``capping`` set to 0., the smoother formulation :math:`\Delta^*=(\Delta + \Delta_{min})` of :cite:`Kreyscher00` is used. The ice strength :math:`P` is a function of the ice thickness distribution as described in the `Icepack Documentation `_. - -Two modifications to the standard VP rheology of :cite:`Hibler79` are available. + +Two other modifications to the standard VP rheology of :cite:`Hibler79` are available. First, following the approach of :cite:`Konig10` (see also :cite:`Lemieux16`), the elliptical yield curve can be modified such that the ice has isotropic tensile strength. The tensile strength is expressed as a fraction of :math:`P`, that is :math:`k_t P` @@ -424,7 +524,7 @@ can be set in the namelist. The plastic potential can lead to more realistic fra By default, the namelist parameters are set to :math:`e_f=e_g=2` and :math:`k_t=0` which correspond to the standard VP rheology. -There are four options in the code for solving the sea ice momentum equation with a VP formulation: the standard EVP approach, a 1d EVP solver, the revised EVP approach and an implicit Picard solver. The modifications to the yield curve and to the flow rule described above are available for these four different solution methods. +There are four options in the code for solving the sea ice momentum equation with a VP formulation: the standard EVP approach, a 1d EVP solver, the revised EVP approach and an implicit Picard solver. The choice of the capping method for the viscosities and the modifications to the yield curve and to the flow rule described above are available for these four different solution methods. Note that only the EVP and revised EVP methods are currently available if one chooses the C grid. .. _stress-evp: @@ -457,7 +557,7 @@ for elastic waves, :math:`\Delta t_e < T < \Delta t`, as .. math:: E = {\zeta\over T}, -where :math:`T=E_\circ\Delta t` and :math:`E_\circ` (eyc) is a tunable +where :math:`T=E_\circ\Delta t` and :math:`E_\circ` (elasticDamp) is a tunable parameter less than one. Including the modification proposed by :cite:`Bouillon13` for equations :eq:`sig2` and :eq:`sig12` in order to improve numerical convergence, the stress equations become .. math:: @@ -488,6 +588,10 @@ the viscosity terms in the subcycling. Choices of the parameters used to define :math:`T` and :math:`\Delta t_e` are discussed in Sections :ref:`revp` and :ref:`parameters`. +On the B grid, the stresses :math:`\sigma_{1}`, :math:`\sigma_{2}` and :math:`\sigma_{12}` are collocated at the U point. To calculate these stresses, the viscosities :math:`\zeta` and :math:`\eta` and the replacement pressure :math:`P_R` are also defined at the U point. + +However, on the C grid, :math:`\sigma_{1}` and :math:`\sigma_{2}` are collocated at the T point while :math:`\sigma_{12}` is defined at the U point. During a subcycling step, :math:`\zeta`, :math:`\eta` and :math:`P_R` are first calculated at the T point. To do so, :math:`\Delta` given by equation :eq:`Delta` is calculated following the approach of :cite:`Bouillon13` (see also :cite:`Kimmritz16` for details). With this approach, :math:`D_S^2` at the T point is obtained by calculating :math:`D_S^2` at the U points and interpolating these values to the T point. As :math:`\sigma_{12}` is calculated at the U point, :math:`\eta` also needs to be computed as these locations. If ``visc_method`` in the namelist is set to ``avg_zeta`` (the default value), :math:`\eta` at the U point is obtained by interpolating T point values to this location. This corresponds to the approach used by :cite:`Bouillon13` and the one associated with the C1 configuration of :cite:`Kimmritz16`. On the other hand, if ``visc_method = avg_strength``, the strength :math:`P` calculated at T points is interpolated to the U point and :math:`\Delta` is calculated at the U point in order to obtain :math:`\eta` following equations :eq:`zeta` and :eq:`eta`. This latter approach is the one used in the C2 configuration of :cite:`Kimmritz16`. + .. _evp1d: 1d EVP solver @@ -502,48 +606,7 @@ The scalability of geophysical models is in general terms limited by the memory Revised EVP approach ~~~~~~~~~~~~~~~~~~~~ -The revised EVP approach is based on a pseudo-time iterative scheme :cite:`Lemieux12`, :cite:`Bouillon13`, :cite:`Kimmritz15`. By construction, the revised EVP approach should lead to the VP solution -(given the right numerical parameters and a sufficiently large number of iterations). To do so, the inertial term is formulated such that it matches the backward Euler approach of -implicit solvers and there is an additional term for the pseudo-time iteration. Hence, with the revised approach, the discretized momentum equations :eq:`umom` and :eq:`vmom` become - -.. math:: - {\beta^*(u^{k+1}-u^k)\over\Delta t_e} + {m(u^{k+1}-u^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)} u^{k+1} - - {\left(mf+{\tt vrel}\sin\theta\right)} v^{k+1} - = {{\partial\sigma_{1j}^{k+1}\over\partial x_j}} - + {\tau_{ax} - mg{\partial H_\circ\over\partial x} } - + {\tt vrel} {\left(U_w\cos\theta-V_w\sin\theta\right)}, - :label: umomr - -.. math:: - {\beta^*(v^{k+1}-v^k)\over\Delta t_e} + {m(v^{k+1}-v^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)}v^{k+1} - + {\left(mf+{\tt vrel}\sin\theta\right)} u^{k+1} - = {{\partial\sigma_{2j}^{k+1}\over\partial x_j}} - + {\tau_{ay} - mg{\partial H_\circ\over\partial y} } - + {\tt vrel}{\left(U_w\sin\theta+V_w\cos\theta\right)}, - :label: vmomr - -where :math:`\beta^*` is a numerical parameter and :math:`u^n, v^n` are the components of the previous time level solution. -With :math:`\beta=\beta^* \Delta t \left( m \Delta t_e \right)^{-1}` :cite:`Bouillon13`, these equations can be written as - -.. math:: - \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} - - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{k+1} - = \underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} - + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} - + {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t}(\beta u^k + u^n), - :label: umomr2 - -.. math:: - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{k+1} - + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca}v^{k+1} - = \underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} - + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} - + {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t}(\beta v^k + v^n), - :label: vmomr2 - -At this point, the solutions :math:`u^{k+1}` and :math:`v^{k+1}` are obtained in the same manner as for the standard EVP approach (see equations :eq:`cevpuhat` to :eq:`cevpb`). - -Introducing another numerical parameter :math:`\alpha=2T \Delta t_e ^{-1}` :cite:`Bouillon13`, the stress equations in :eq:`sigdisc` become +Introducing the numerical parameter :math:`\alpha=2T \Delta t_e ^{-1}` :cite:`Bouillon13`, the stress equations in :eq:`sigdisc` become .. math:: \begin{aligned} @@ -553,11 +616,11 @@ Introducing another numerical parameter :math:`\alpha=2T \Delta t_e ^{-1}` :cite {\alpha (\sigma_{12}^{k+1}-\sigma_{12}^{k})} + {\sigma_{12}^{k}} &=& \eta^k D_S^k,\end{aligned} -where as opposed to the classic EVP, the second term in each equation is at iteration :math:`k` :cite:`Bouillon13`. Also, as opposed to the classic EVP, +where as opposed to the classic EVP, the second term in each equation is at iteration :math:`k` :cite:`Bouillon13`. Also, contrary to the classic EVP, :math:`\Delta t_e` times the number of subcycles (or iterations) does not need to be equal to the advective time step :math:`\Delta t`. Finally, as with the classic EVP approach, the stresses are initialized using the previous time level values. The revised EVP is activated by setting the namelist parameter ``revised_evp = true``. -In the code :math:`\alpha` is ``arlx`` and :math:`\beta` is ``brlx``. The values of ``arlx`` and ``brlx`` can be set in the namelist. +In the code :math:`\alpha` is ``arlx`` and :math:`\beta` is ``brlx`` (introduced in Section :ref:`revp-momentum`). The values of ``arlx`` and ``brlx`` can be set in the namelist. It is recommended to use large values of these parameters and to set :math:`\alpha=\beta` :cite:`Kimmritz15`. .. _stress-eap: diff --git a/doc/source/science_guide/sg_horiztrans.rst b/doc/source/science_guide/sg_horiztrans.rst index bafb4c72f..f85f13ee5 100644 --- a/doc/source/science_guide/sg_horiztrans.rst +++ b/doc/source/science_guide/sg_horiztrans.rst @@ -35,7 +35,10 @@ versions but have not yet been implemented. Two transport schemes are available: upwind and the incremental remapping scheme of :cite:`Dukowicz00` as modified for sea ice by -:cite:`Lipscomb04`. The remapping scheme has several desirable features: +:cite:`Lipscomb04`. The upwind scheme is naturally suited for a C grid discretization. As such, the C grid velocity components (i.e. :math:`uvelE=u` at the E point and :math:`vvelN=v` at the N point) are directly passed to the upwind transport scheme. On the other hand, if the B grid is used, :math:`uvel` and :math:`vvel` (respectively :math:`u` and :math:`v` at the U point) are interpolated to the E and N points such that the upwind advection can be performed. Conversely, as the remapping scheme was originally developed for B grid applications, :math:`uvel` and :math:`vvel` are directly used for the advection. If the remapping scheme is used for the C grid, :math:`uvelE` and :math:`vvelN` are first interpolated to the U points before performing the advection. + + +The remapping scheme has several desirable features: - It conserves the quantity being transported (area, volume, or energy). diff --git a/doc/source/user_guide/figures/CICE_Cgrid.png b/doc/source/user_guide/figures/CICE_Cgrid.png new file mode 100644 index 0000000000000000000000000000000000000000..db665ff7261d24e6992be012254cf19e57a8c605 GIT binary patch literal 51536 zcmeFZbyQSc8$L`*sDP9rDcva`JxF&qLr4ioNJzJU(mm2BokPcvD&38A*9;*s49#!w zdEFBg34qoC8cgJB`4+d&dEi~+1$cP($eXTm8RTl9wDJuC@9SF7G`Fe z^6bpL=7eTuy+dp)xGo-=pFTxtng#TC40ZH({50#%N&8}Kyz-1-LI4;wW#BNKy8scD8)Ihk3_&CCW-s*?wlZ~=Q%G)D9+h~bb&ox0QvOlMro0p7_n_vwc zMQcORiNu#?0BJdCD}4oP6%`a# zg3g5sB&Mv3ODi3V6_1*MUl!Yyv95~JItJMbuLmdFWu}W=`Yj z@Xpay*i(%5PYq$@@qIBDEzO@QZuVld`YP%)Qcf;bGypBU|1H#cWtE-nub4^9t0PA3-|F62}TadGo<@$zyYYjC)FIl7s9ayYuu{ng38 z`;oSCwQ#X@cC&SIq`B|c{Ed^ln;0$a{e}MX=dW?P*;@biN{+7ox-H}la^0Wd;^E}x z`oA@^^0fVbXm)?*uV#Pl>#xg+-ghRf=3;Aw9Ps_H#Cb&jT;PAo{yRL;`(A|AZ9T2t z=}X%>SUI}>X~X~Or6||`_0<2hslnfx^78&q)4!eiS5r~0drSSza(_+BpHk$!h~tWK z{b!EFahF*!Kck=kP!yykwLMYy7O{L6HZD4jdVFF%yHY!6BiU&p30~8DY-Opqt9whcn#uu4uzA?;@ud54Q*&Op%1C}PRQRF(N^$>(-iFJ@S{M?9NwUU;@06zeLy zn|unrqAK(`ZSyA41}DT308mgd{`2z`2cx;DjF{waMHmF!7}%EQG&F%IXxRVy34|ew zjP22xqeQ8`Pi@6D?hB8 zTv+dt&ek!R_Oq4HHyK4mJ)bsG{lXnL0{-d#^I#vx@{lVbR>G7HCSX%M;!>Aa*gdde zaXFxn7%nf*bP1$Dx(2bdj=plXt*-&Y@m%eT^lgS_9 zlBK&25v;l;}CaB?qAZ$K*^vLLr#Mz8Q~Ta%P8YH+gCE1mK)gY+ul}qzoa_b zvPL=fG{p^97uCzV8-t7+N}-?-qtzbC1yOYMYR25^=(p&+U;;pBI}lByu*e%M|@CA-nwe%b0_0@Pu(Z}-x57h;R=$+}RsvxBwp z{l-HL$27^P>urpyHQy~Y`R+OKW~!*-F*&G>af;ua7Zxy(&bcLq7K1H%GKa>(S**TX zGvZZYENB|%JzIZ?-nZDSyL7uFGF#8qaPe+Xrb9AF=wzC#ov6Od6Wn#Ue(jfTd6&h1 zJUau6&RO`rR^L|vB}wYMBe!SsKzw|E&{6f<9K)<@cYoQM;2Mm(DR6CKm4 z+F}3QRPvzrCitQiYG!=}X{y#+lrP7^pm{;Sj3&%%db%hu#oCEOQpF|J>->P zg`vrZivPoc z=`b#1Z>m1Z)ZZ@JEyWsreK6$Wg9OZ4<&LK_3bEQ-9-4mWT3XEmajASaCZSQ-wc!90 zhX59KmN>{9Yr7!0d2_NTC3VRUt8dJ0=xjbZYM)IVNX>wy?bLHAb)fOitKA*yV}#G+ z4=pY%CNBnxytN*{>Lr}PPMXeFs=CBZA=&=L>T0VI)5!$a$zDx`^1ZXy;iVg`Knr!R z($r07e(^P4+{ahCr;vPeG}hbsesB|F48zW`F^3glM#J`Xc&}g6(TlQ5T4#tFI$pv3 z%D0%%(e_!K*QuA{WR!GgmqMvxE`C|W>iqn??BoN~1>}ffC@QjTmSu|9o4+}BCC+)5->mxdAfDOJn~^`gM}-gQ6+2j!8_Qb}oCg={=7LR%Tj<=H zrE=G2n_l#N1o+^(>EY)`d{RwXiQ{hfAS%gmNQ_qPhgjD$K~kc*o+>P;XZ^P9G_15;N~WD!dr4+hVN(cbwNXhc0$y`R2| zZ7}m2dn$s*!uPaHe^K-77DB7=-giqy;Pn^z>Ai3Hg6K}sWHx?&yt zvb?fcV~SP5mZQHOGUx-fFTWey0zcYWg|N%6e8T9C-pdt_{f2f_B3^ zPmHfZ$8>x6)n8W+eCo*SyQa3W!_uCVb;CrSedJH43c@K|w!L<|oyBMDo5TqbuC&~5 zV4D$_v+j(`I-U(aWDxKwN&->5+CY7m#?JV)FiL!t^>dVJ9G0Vqn)e5%n+HW_xz+>N zFR@P*)skY{^y$88>_TTTlnh&Elp4CDzIZmB9;C9)n7&W&E=pztOQ_#uYUh|9TFzF4 zeH&tn)F}_s&@9S|&db&_Q}MnXJ(5V+$fDa|da4|!rTWYoIQSCjtE8nr76&om(u+t) zAG-)#@EpJH=maAw@Sp+(-ebLR=avJg( z>lF5)0m!E{ocjK)y-G7_38f&FHds@{u^lMbYOj{HaMm%OPzRP*JoXs4ZMC1A@O(Hq z^__V)MOZ$;=Mm`lZ}p3xP9q5{nJZr<_9ObbMYLB2oCKfaKN(zZFqr1@m0V2Ho87y0 zC*X5A9$^q~%D&#Lc&Lr&jj}%BAwp%sJgsRxxSM}>VRvL*T*01_0bEwFHLu4gzQSH3 z^ie{zSup6k+cut;y?5DHD;#6!>Fa;9Y=U;>61fqPZkiN)?P~y^YM5p#z;t1?~iH6Fet^=e4nZ7Ugz3uWfXtq-*+{-$E0%Qa1`uj^a9=rcs z!pg2l5QV?PYeMZAth zLOg%(F=-)dVjx{Qt&NC)OIu#exyVP!>9tt18z@wuXfxBCA6yi>wv`38!zTX-P! zNo_bzq30BIjv=kI?TRbY`=_ z=pd=^_QIoSCwPKEbTB!jIyJo0{wcwi_P2?mN7*CMSeDR(S5r_gxl5C0esoz>Ip2H~ z%dXErd{K!uPghn5L_Py_RkI$E3CMaQ4j3ZaZ#385(24ZLzkimgW44`ze*9a@)oLiL zwfn`icCx&Rd%m=ES7}NYH{6F-np10$Ho#?7da4x^-!BZ?&f%ldv%$9%9^&%z3(s?&~h+X*J0Ecc+lzi+YhLEGOL~2>ZZD=tO zGY+aCj+@p`HW(JE9+MnFXJhdi#MvMgn#``YbSE`d&bQ3INZULa(lGD`Oqbbd>@i$6 z8wOM;VABxjqh&J2;jrr}h6LWBJ%Nf>?DpXE++LeTX8d8x4jkOxo=#%{^cV!r;V7A+ ziyCT4gnwtv>}VLgq>mV%{EdfqP$J6?ikK8X|Ce&3ppl}Ykv)8n_-_t=e`*B}S>`cG zlrQ~nzID&!BT<5)Fhdmn%j*AJIw(p6`dO3q?<`&>5YQd`o&G38pm~FS0_Co7Xd>c#>7YcHR5Uqp5^;n6N2{j2|Mf>fgs+TW? zb}mYgaxSJ8HqB^62mg$*MFz#zVCsaTDuQFHbGuDAw-fsnhq|5~6F)zH{cRfC$zi~q zPpX(teHpNM+Wsho!)T+ADLV;?@Mi7(joO8rR|4P7*Q@mR_v86YU}9n(|9HstY;$U2 zKlz%)UPdN#=On4#sMXuuha`0WK_K$lDB5T;fSgv$y{xzJz9!o=iGVgy_@q{M-`DEu z@tVf^dfg?jZA!1xF*&P~t)Z-oTV;0RMs+HmJ)VoTNUrNXe|;~vi+ZPJPM@>Aqb6}C z@oZ6cX_O#qj7!8Jm$|ul{jEQudD+Xswkv{!*J(-AVu=A{yjHgdEd>i0lDH`ig*_&3 zNvZ2fKR$M8T8|N-7ZU?Mc=U{>{c;M7=Tmcci)i1BV5{Knzv^cLQ=EM;G#pbkaEIv{ z8)sA%Z)f>bN3F5(7Widyq~%}_YtqcdN_kcaM`!U*n2m>+)AQsQ&|1En0M_CHF>KK zLZISReeDZXUyeR*YE%qELS}aMceWalCvX`e`^z4Q+f}dmmvO}v6=&=dPp#&7{Vp~^ zymph!UOQjIqevNA_vvQkbaavnJ>hV8)^7cmN~7uCUU?zJf@?GoHqCB7JNHfW9i21A zYxF|qBn(uHIjI$A)!Iv?#It-us zpl;83c?ST6z$y}Lvi;7S1;_ThA^fr)DzNEtqjX?~a6&aSM?C-pt9X0Ze%EH~d(<_Q zlAd}%io}Z^HyYj;h9r%9)dNwaI4~04oAul4lM%t`DZP!`^BxIr2f4eV84fOMZ&-PQ zD5~VdbRvsZLGKM>nG4cvFj4w>>vE_SO0&9Km@RP0rIsz;+Qc5tuV&hGH9O+ZD12U> z{_1F!giZntXk>Q0(HD-wZcyFHcR0v5l4Slf%xg(7b97nH%gd{KuVA^{yWrM;DxjR9 zHM;o#q7)nw5>1-&>(?(*J%dWKAf`{LJQiU)X7+<$csRBY9zl-q+llPkNxQQcR#jj&occuvzao3#2Z))Mr zxx$~u^eE?PcT1h*5(X_cIz8wk$|6zF%5io&EI0d;Wd#;FA}7z#boJ0u>k*GXa!`4_ zrN)+%BqIRiEMg3@g?VF5C%$gpA6UDR9T$a1&LBVfQi!q(A*leApbx~Oxwff&2OhBQXmT+r zMwo+sdm;R!rR?Wp7-vn}K`PI{s0mm#WeVCKT6|VwZquMdvtB0QBS*7>8M9&O(}>mc z`OtY&A0HKCfi~w*;$4 zQ3Nq~_kr`n@3H~iWFBV`f%X`KiREo2g-%&chn=6nt)a8xXxJFRf$*G|WI=A~dlagtR~-Yvo?1YC z1=JPG38spZWoq6|*@_AG3St7#5P7=4{!D<)jCx{O3bx*jZ<%{@u4anW`*{R}H-Ma) z8srS1@$GH{XB8j=N&LGN7S`j7lY4GDd^7f7J*I&YbaBeT9ZR?)*uturk_$jHakRDf z1XT~`i|H*Swo07kC88{(UYthN*MXz|i~vA0is8oAkow_p$wBpaE(~oNWemC))@-JQ z_mKO#`X@=p!4qsZR2V2g@71i!+QU;d`c0`ESL{d6xB)+Y{D=yv@;}5l9^3y_C1v7! zB;$qjmuO92vZ@V64B8&Z|1RtJ2*Y?A{~nmEVTK{B+i|%GR;wpsRIb!P@motr##wrL z`d+W?OdES36R+!<>+4KE+EF;hX)7C@Y!o+Dge`I|u|FTST@Aa&M!OenWr}$8Com{Q z(eC)r_QX)a+@1Q6K7kz>8)B-AmqC0k8@(U64pviio(@7wVnZh{kjz@xG}A>IyS6wG zKCtYq`P+ku(r<0TuM6JyOBo`Pk$Q?V*K(xgVG5gX05|oX*on!)s(g64gOIs{+V~Eh z9tsN#-q6s{YGk|}D}NcDc_>-?PkGDD+58acVf6s#$zb-Y_ur66WAtPxqmqh_Nn9&b4;jkzXjoYlX=!Pr!Zo3K zrV0`#o3l3A{MGl#t%JsOJn&w8e0;ahqW8M}WbgU8XVcX|bL+Iju8Nym?Z=NFJspFc zShBOgv0Z6{4<#X@Qels(IFEfGx&4L)1}yikQiIcv3bxeRylh3E>x0oK*DdBQqwwS{ zAF9~#7xC}V_5!Ao0+BI2glzP4tHIlq5Pa!uiZTU!+hVl{;Z!Cd?6_X^XwiKJSe_!t z$vF(UA}vf8aYSMVmntM^y(S)A)@o{M($v(Xy}3dlnx<_FGZ}e!cx1DX=;I`=1z+qm zuBtpZ7%e+!@CDoDm#+hPSWd-YuoK(~I$4~>u}iw&75S9>5ZrcYhm0ir23&^x9-Nh1 zaF0vpfszKWQDQ_yL=+H+L^!tx3BH^}$lz-bs$q;V_Zoo3d)e6-aOdxJ(6nv1nCC?} z$`q<5e$mO+E}pvU_{7_DOXRR+!alW~sjXgkcQ;ew?)s!WD+7*WEMfoEcCB+p`y&FI z#`zVR_b>0o<1xeWr`p59Fm}562og3+q?0!(-O+t^Q#rWB+2U!n7bu8W4rs3e<;Dmr zQu+S=E_K%EclBYU1@QKU8(qr5b4=!`SAVzkHX+iX7{mL6c%aQDe zi5sFCpj@h3e(W|e4qP4Fj@;FKCKf#VkgJ6z;O=TU`Ww7{U)I{L z5AKM>oo~}x1X~oRv-fwdfD5H|!(35v+wjk`H8x5}s9I$OwAS1vS+TwwHfo?CLxvLP zd54C+g(lZ%@g9iJtk8DGy6e@}=fgDniX-8pMgOaPlSPOd2*+>DRrvjyE9Aj1Bm&Y~ z840+p*^vl1Jc7Mt5u;7JwNE?UiM>?w;GlZtoMY_#9n%Hrrv!$kR}bN>>DEevwYC$& zzIS_->-X+rI8E%uiXEyS$-zC+bC|Ecn4&DtC(8DB45nJs@=$rkXB{Pa+)Fo}D~I3X z@ajm&1d|vgh?C9v8Yydnyl%pg#Ls_6xhAgO?|(LH6M0ML?tlNjoAyw=KyzR;I|~v| zcznC>6hL(I^zCON)0`>nP02rWuA>13(R+So&hqzsAx6$ULD9vPdHwf{=rS^&+UBPp z@%MBpD)Xok`MqX@n zZmRvV#szmXzu{?Li<6+DflQr-t@#T{m$f{k_@Ikuv&f_m_>ERb9`SHbM4dgdeV)_c z3YUSMU7GhYulJnihF+L;;RdVciJNy&M8BQaNx;Bn?#Q(K*)8q5CkzI&JpC>1(kIl@ zpS1qqgEu-}r7Lz)=`x{`t|Q2y*(DzW(H);4L`p*Z6H*A5h4A8XcQpSb>UXfpq2cNt z-5XGi0aEr^T7>wmL~%(;d`re-&7_8bqTFAdCdala<-Wzi>GKt-EQKoMV`Q)x3n)Dr zQblsS@$h70ooiZtVgPqc!`x*PzpyvdCJII`H(>52i|Qx2ixeRw43?p>u=S^)8!RiS zxHn06(<=d0^2=`hV~@mqVPLi7(KywKCpmn&Rv?K24;yftjfoLWzLky%sU&o9cS5co zu9wl-#lS_j)F?^wihcpRA1*UKoMJprTjMxk(sX-g0;*0xuRfdK&adSjD0TeyM$57@ zFI&OQ7jnO#WBKFPBk*R6Bn<(E;sERB+?|g~{h=_;6|bJ+bbY9%VN!n91R*O93)#5G zP9I_7sMfHMd(wjMR1|=j*9y>~1i)I;Te?ixTS{jl2eq?Yec(VCQV~9Xfi9cPwtLe( zfLR!uOdi0qXfNbMfK43Kb9d6zB50F;+cHiT+2j ztfC`Z?ZIP@`xVujCa>egZ8do>g@YI2>RLLCR~}$ZrV$IP#ey4`TU(23?@!Aw$lp9b z+h_j0vb^0JQI}e+aD%ddRH$gDY?3n#8@iJ;modv#H*k?sCLInxPC1?QBuk2)j>$Kl z!2b2r^ZoQ|gG#s3clyUAdn3m0dmlhyb~+D-gy9z4zz(# zH+tc+L!{cvsD9&co)PP;B6%%XOw&Yz3)Vndv|=3{B?1A{dql#UCm zy)l=2xHEDz0rs`RU?&H)7Kj(t4UxXE+18vbH5_U;Y%vgU7_b$<=X>I8Xm!Ax*HCRxL^n?uE4sCqJaBLFPFDD;UxQmL#BN}!Mqp#ksePKv)QT% zf&PE`KQu6Ho+yuoJpAp?U(|)v;9U0i^I`~V157D~hS-u9jYvP;w!xD> zI60oT-P(;=o6hQo1=68;=S0Ze-4wq+a^1WRD4|li!e^1ox@G_4HDnc||C(84W2tz!27?;ZyRUE44d|EU*IbTb`-@3HNRKvUDS;rd> zf#t69H}PM8Vz%j)hlX~MRJ+6FG>sO28#Oh(CNBM6|Dg zVrV9=OUDK$&iT~Nd*I95KMzK84%IBZK zi0|&cn^t6KlpN~%+K#8EqaoV0DJ85C(d;r^2mWnQZ0M=t(4Gu`b+ZxF9wlSV0aQgd zUyof8OUNV2UqD&B-C4uTYKkB{k}tyMaaHY0Aq5u;PU_B|PO032p9AS-{Zc0Uwya(t z`~MwM6T>e%Js}m^kQ6-WycW`JViwh_XJBIaQS_)vgTu;39F3Bd+Y;cRpb`-;9#Zh> zw(L@|6-$keG<|uw?YR0;PzEqYD? zO@WTc>DD^6JcKuEJ&wORGsH`>iJdsPujAwB}+O9N=vw!j{tA|s*~vv^a}g@cQm z@}lQf9Vyb;D51{G2_r!GO*PL(a}yyc?W?QFv2l(UhL_Q=R4!wMjZR%H%2)wyq&ZosP6kpMh2^599bz{{S#HQOKurZ1&tnRgqKXM^`qwD_6l9EFbFV!U;78iPF z4c01zSUKMv90&mv=d3WgA06h3w@W6B!*+CIh4K;1>)*!5qFmd}dFgiXcvR|} zK2Y7PdveuIyZ4>fl|}rssdNjQL@eYwJ!2i>S>H|~d&4bsl9RdfMYE(?|9fF@!r zfLAiZdTP>4_p(Yg(W}rFKA%wUw`M`_lhqmqwRMYDQxOijsF!a@R+QsRN+LJTHCi`Q zN(&Vb2gYM~8<%?}Rz7!$I^BF$iv1-m*r%Q-Hp&u)6<4z_{q;Qielw0@kRml&2cEwv zEu<&mk)|nxa2jQMMJ?396${RkJjbKsJvxw*VnQ=@#O@`a+fc}<6`pdRG350)FuqT~ zxj&h}#%KXJntWzAsRCBJ;6^(Nl9ezVHICiTv+}a945*?$DJm~dY!^d>wiNnfPwRpP z1gRG|E=x?os9N+aewAZRK1ic7aIk9CJ1nxRi5_9R#m2}3$XL~}8#GBFJxMYYYN((V z3Bs}9lg4+HeBJRF&D6I>lFHYm-Y&j4nU=q-(LYSD60$6y;l~7f5-_2)zVUS#Sj2hF z)V%8Kn-UM`X2O5?XI*^{yUeiNXvs4m{(9HrsnST0mgVHfNUO=pC6}`(4ZFCMHeRz1 zl)j;%Ki>4Dv>=gaU_08?H;)8eHwKqPibypedS)k;P#0+hiUFPnmlF|B3?11%7H+Q2v4D?U-rx z-Cx+oSuGTQ|H1M|Zi&Ggp4%<_r!#+?+R5h@70RViEZ^Dw^an)BBbwiA$lZ7K+bAc0 z`k5kjOhnCV)>jmTPQcj~=BKYZZELg@(GvY?FE7eZsjP zK@c8N;IKHCGG9fbKyY$!MqnTb{h1G|y-q^o0j+lo8 zy|3=s&%K%c{j-Nl(EIB?6aAMx{xpBZiOdpvlBlEntsX%o8&ZU!7j^x&dNeDINU6uV z4gmGP{Sg2{-g8dV5{^Ko>(f*&<{Wo;;{|3Myu=BwDgKY``7t`&ER$H#L z$VEqq_Z1l79(+&hcI|;vs4RwVY4X5)X$to`X)ELTf_WPYpj=K#fvV$Wk4J)@HUmic zkZqf4xp@)qb`i|CS$>h4KurB2)>K6IwN&GWUyi@tA$PVy@{qY?P}v7O2pjI?%#5PL zDsC~D<0%eq;^@aJLSn)(b{Rj1)X5Dl!dBI{*KbJ*^C%O*Z3X(Qh&IPTe9a(b^|xhA zYCxHkB&PsPXz7`fKD3x{?1XlN*OZ7Kb0FUZ5s+_!$sX?qP8#jBcwSHaoJ;ciYWH-PzCk2es^06CRC;s)KM$?X5FYAd& z=bq^&=}h*Du>S7t11H&9jBkATt{{%E0-hhOwI1IL(~^zy=?c`ft_y1#g$M7UIq|2u03hki`CIkdMjk7$mh6JS{B|8LjPeP|?;YY4?+9G>AEE;#t`Z<<27WXG&d=bF#hdJ70)j4Z zMZrsQv0gslwVO9*naF3o@+$@pC#JQ4qYzn?ykoYQ6#;LWHc0e7JhxcuQ<(JColDR24lMM&YFF`G?*y>J@v=9-Nk_YGC3@? z*ddF*xUg{33R?8i%XeC~xV-c!g0}b7p)4?nxoycjh#ylX>s;mDv^N`!gG#SpYd2{Vbuc{PxEmGbVPaDecj^6 z#;+HTIYflj``Id9)*Ss|FryF9YI|!@Y1086W$o=j&rhjrp56#YsUo3B^h@pLZNN?z+H4PCJDSM(^f#+s)47N^?asb%twoS;#1x#9_ zN|wxO?u4P>?jVKWMN1_u*1w1Czep=z7R;M~?x!;!?K@A1C3`oU-fqiJ@L9TmJGgOFqm|1GVQ)^u#E=h(*6(x@qdXl*Q0T{)0WDI_30CAcW)vQ~R2{z-pSi2Q&Bv^D(fQv1IxwWuv~9j81vWE1PuDn^jVS zR{mv3(bO(}`y$<)E^Npk!CijoORsuvogCM%WGzOYsiC_+t4l%kNCIg~E>|BMSc*aG zZ9*&g1gm*^Qb#`Yx%uF-wy7yoT^G6Ntkj!u&jPE}B)Wfp3t$LmvAj%po340CLa*e6 z^h=GAU%3WiU;FT`^SqS9BtYID)&^z$KwT}WZoP1~Tz4GeL)v?A^El*1tyNRW)bHu+&Ov~Hgg%h9A!z{+wVYn63Z;6WpDR_tvM0!%V69<6D?Tl~jRB$o zf%KU*@`Lxvemvw}f-UY*eN5Duhecnb=&tz1BV~|}3&-rnjQLcugC3%d;+lt)aG!s# z+LZiNurg#IjeXq~3Xg?H&0H3Pm44tO5snIz_}C2g5{KZ9}qYf0z5B;2r2l^%~oOgzOY<>N_n7H0h4Lv zY{`1+#4S;l&0NvgDAeJ@XZySHx;rXy!^WOpeF_#6q9U;3tW-jB64uY}SF^lSm>^6t zik6<0dG7WU8VB_5bP&~=snL#1v3 z`1TF6fll+bsA+A1J>?S+`^;2LqUY!jT}^!242=Tc#@njd)<>hD0acJHr3&433NAF# zpvpW2LOUGe{s!@BJCAaRoeetKNUIq1722vencu{X>GB-&rFj&hrBar(VxSH?()XJq z%FKy`$;Kk|XkpKx`x+>ef4l{1j@m7da52?g>^SgyA5Ec-aN4PTXOUGx9gp)fk>NEa zpS8EscSj39zY~8Y1&|U*%IT*v#C|R*1V7Xm^7VaNQwv`!sR25fk{&qbmFx|-8sGbx z_P_Ey&KvhL>DWt3;9LjLzDw7>AaBEkDhiVSe10XGBRn8=GbzSPnHkJ}^$6FcUc)OF za=4b(&PhTkU#7*ua7mRcGIhghUoZH;=l$5%?&}^Khu;g=cSC76$Ky=%G7>pF1~QOc{^8D1E33+1)qg-R0agw5d$+XcInlgS z&{RIWv|0SEdexux=6C;F{setF#>M>+WgWZ92ZZA%t^Pq>QhV+;7PY7nNwTgIFw-u^?!~TEJ_&#K_~>YuBte-$F70!}O1H=q_YDF2 ztg|#uX4~t#V%4XCNYx=ywa17wy~B@QSpw&VK%QU&=btxE;C4G3NGM|{nv*lyO%3(_ zvKgZ~ZL_35Fg~_9Dn$uns)w;%3*MO(b@qJh3(R_dyu-gf*~++tFk5i`ZU$dSAaLhW z%rZVX=e3ZoQJ~YDB-$W%`h_UGwni|+hSqP(w%KBv$^rYwmP-w}=-+Qt49m)T^0ULW za*uZ=3l$=hvuqqlc(R#tw>_lCM{al$>nOHFP5$Ix@zju>>Gy*E!yr`#0*ZLnCw~4= zp@OvF6lwu7;^&8bHji0F`R2X8Rea;|tWL?#^ziB38p2Y5KKVIl2Cx~I&@M+v?|6A% zB-=b5P5fr0bGl>R-um{_(bBL)zFm=JCZw4Bbe`2c(&5=|S*{VwHAg`EX*X#H*W~B~ zM?P4`LyV8o%AhBsu=zZ-@45b$lerTe{4R%r#-jRC5!duT*-4-dQeILzE!qjalY@`>+5#O0^IC25LA07*E@vrO%xT z@IO1UM@qmNtf*?pwLHHBnC6PH(*=iymONR<``})kQe3Ah_k*NlaP4!?-)~*hs~)IMySxN}y#5g7+lMbrz8wcpy3hs!n21LO$6%b+o9PE|FSqaU ztum*6ymgoU(ZXVYdI>_^%UD?30CBFX?_Zzw$ySr+N13iI3bgAx;`<%F4> z1+;f7I-IL%`u>#HzKY^haVLB}{%rJR>sKR*XIioE3KAWwSU5}L1NfTTiVTH-)V*rC$aRDQW?;0tL<_~ z#YTW=;Mr82D~n){NZSfI^K>(^v^mk~pjPr`>9n4AoG05lSUg)ac)6EM{bSIPWy}43 zfnH6#fElL0Qtl^c(Kbt4Aik$a5)l{Jw%dO6B6WzmvCW10`OI&0<837L829Z?*)lYA zhH^*=H6$$xFGIj_jjPi#l;W8b)SNibwr74dciRKx&7!Sm7kKdXIUmbP8;K=W2;1C8 z^S$9&{O*UDs02Ip1xMTSXY8=w1KAk$?bOR_&)6} z)9E_4I6K$KG~oR(dq6*A{m#6Azi2Y3I5RK%#-iZ6%7YN@?UO$+*31;SZLp2Zo@PZc zC}+hPCqkX$ndFOah3QI?b_Ro$SUOK@v`5zBz6532y3)S1EfHeTx!U$F7{Y;_g#@=A z;uLY%@1YSB&R@r>x{9cnmY(^orIMuh2HVVi##xMTna(rmuAugF4LOQRSr)y>yuSKb zZqhaOy?5T!N>N`vZXw6t5`T|`X*)B=uuj|iH2N|00bF>DE`9g(R?sJEHQl}R=ZWj{ z%&1Ukwxk^}^O}tHVi&)mH?pL8YPvQ2Ll>`r%kS*6g|HXtLk>nD%>G&`>D6`*m4t_d zXYrl`t(?{u5Rb^D(h(|VwPlx#nH$9|mz$Ch3fGSUVtaKL@2L1<4-RY!oCj*N^h#x= z2ek0%__btXyB+PeT2f5l{-?KY(0B?KHx7G+hzsPa5$J~sNtqS2c#L7pLNEr?#8H#b zOKvsk;$0efQAHJp$4ST-zzMH|VpVvnDop^Sw;wBs$*|UoS(oW1BFZOch)c_&fgVu~ z${Hsy`WsEcoZvXi9H zM&8^uc@UV!aiX`Mi0$4!*wiD4At=l|R*rM|9pv4)NIkMsXsG>sA>e&CN-8*O_uJwA zx!sh%knOv)>z$3K6-H=A(|L17f$=DgGa z_^sRc0Bh-U$O=O(FL%f5I*H8gM^NjQ2T%#`FHqRjj?<+YzH+5cK{v?mdWB}Faa_Dy?cQYmAziA(q=h|MJ`e(UsQz zUZ2a9ze?&Ntp!ZNUL>A@r%Zfv+yhByy!b zKxmY$tJqEMb+iNpm(G-Xx60k6+?h^^%xc8M75Th^_F|D+Y2Z@K?P^6sZlYuA=;FXx zk#5qVz~+GGl#WVaGbg$Ef|JEodJVl&|J38%9y|qJLs~bglb(7Zv%oCG&G75K;Zkmh zL-Zaf$EJyvp(K9#Gs{6K2 z11;3i|8j{E8LqtT)3%CfdX*=-33|Vf{s}-1FGhc*ERh;nkR$gQe*ri;R#;kDx&OMQ z^ETa@1=S6JY^)PMU(Vqc5Bm0fA*Yy<`*yi(raY7KE}=Raq0k4l*Nz-$$f^s@1y3&lJM6Hj3Yj`W*G3=fAr_+X~_(vG(CrNkq*@sEZ*+%OI(+>nN2C{{Jd$AJRaeH&Mc{c9^^1r$4gHDy+ z804!Qf6UJ;IB42+e5d4bQ37T;XTJpmpsf?wMJA=$9U|9 ziT*qp{48cQ_a$tk*<`!j)uZU@YrW?K%h8za;)B5HyynMgyW_i$ulScwpcx&JTy4yl zn2!-0LVCDoQOI2qF90*u_3D=7*so|RotDMD&R4@Xk^5K@#x=!=t0DO^?i$b{Vqywb z4Rj|x$zBn=p3AV?umgUMeTkQjMf9pbm)^CNUK6NznkNZ%a?!yXF@~463DS=nXW2~s zS|QZ*wh?1>>@7Vq&{t9|DIWQPM2) zFq?do59nw^E}r`FO#?bKf-*8F)KIi^x0*@vS|2{IaJ-xekW2Uhi3WXSi7w|Ea$tMK zZt^Df*2pyyvLSjyK@>2t6SOFO#}6vwcr?Uu7F;o;8yS^`2_8UzQ!V^^ ztq<|=bP>Cg#X)G#O@``9OG~2_{HCt%H&WSgUu@zV z1d$;>o}-5LSN z?TE%6i|*a6Sb9MB|xjBqY$Aru)1Q3Xe{6a+nKlxejT(l2^;K=dEE9-vb#uhHaT6s6U>ZA1@!Nd>) z>{m>chk&0XhY_stL-XWWI_t)^zxdFW(5%@Kggn+dKlA#Wy;~Pars>vRt*gPFoWfGD zRE|J?JnlpPa+f4DU?)t#8#Mh3VN|{O`OcT~vhGzt1M!w)Vp0~JPCkP|Lnm|7*|9CJ z1vEZZeO;3(f^r3opTF+-dtvG3er$WO(!7W?PU}z6G_v_P;^pdg7`WPms;3-`O!D5m3cU$tjxM1i} zvfirrd0;qgu=uJXhU6K~n~!Ct?Iw&WX_mfDP+dp9siKEDu*t4G2DXL)$*CN%!;zbI0G;Ysl?)Xnrb>*lj_HLxWZ598AuD1+`s_Wi|1t~=k zMLI=7KtQ@n1f-Gf0U5d^hE5StDe3O+ZU(UEj-f-kJEY$|df)f+`+s;pGG~~x&)Ivg zz1Fp^buEoi{f~N1ZG82B215~gf0KH^7PSF=B~%p7tn0EOYvShDP|m0J?TN8^>M3dd zQ-11#)0UU{g-^dePLD`<#N2YT9nWDIcY6CYfv}{aLIu>GB=iB*uWi=7^Na4KRJLAD z+b?5d?Un^2-<3u?RWUgQ*4=JjWOwS1$%nJ(@=zg!Ikvs=w$kLV^xT%C9Xjb^JAz3# zh1zxX^_eS0bXZH9Dz#+0)KuR@s^xkst<4etGJ5PV8m9Cms9zvQ26vVQj}Nufr%XOY zWv-p+-9A&4H+|x@fucfaIKvKH%{}`!s zY{HYtM4}g(^a~0J{GdPLsw-_-psio)u}68S^tz!!EWDxXw0r8B84%n} zPFG5^%x+{h0u7S4w0NZf)iZK3LDrvvk5+!@JUBWy+)Ng#R&=@EcWCl$Jh()@y&N;G zH5cN==Ht2WWYMwF>bqH+SzSX#x(D>76Yud^$}>2I0{6S5L5YXUMfevFJJ1Y;okwX1 z9>sJf--k17@|wuVe5LFJejM6JTJx2ImWK7 zbu{S~>uzhco_i(Lugme5xg?Kh#L59$`4K6fWW-P)Nel1!k$eOZ)lGc=EG3F(TuMej z-p$>pCHbY|@;pOiY4Aung3 z%MJ4Ut0S);lBB1KDZXgc_7lBY#2>XW3Ipux&qtKg13GmMvvWwU-dfB{&h>ezRIO99 zlBgwvOq0RS^88m>nRK8ypywMiYUtgiLMp$pn`_wd?NY5HGd?ye_{b=a!C*;oi84_F z57~^js=9Ksl}l(aX%bClXij%&CP8~2j$gqBY5j7T9f^1kf}5%<$HeZ{r;Qu~_1DlU zrVZAJGk!8UvoQ`C`E@q-JL{k>K(z)MBO{giXnl-1BLqWe3dU{+yW4Bl0}3fWe=Gi zyxy;{KO>Xcn^Bo?8mrt@FEcQB{iL*MQ&>e?Zu2>klIVK2{j()j>HbFso9)#1=IQ5oIy?c|j|{ zqfnrpmf5&dcgeFDgByFsN)X5ClMKR&&Y0-kg5AZdUdo@Lu*wjRuFcVb?L$IaYo4$~ z(+>^#Wv1GiJ}pym_rhd>=%Dd{`*DAdakECua$zAuBdy5#Q@6$BerR_>e~)An&HTsK zHhK6I^=P(r^SxBp`Y3{BV$E`0=HTYLT4g?oo_w{6O!qx=8`i68FuEU@Lr{d(dKkJl zMpqpg2N#AWXOB){K&o+~VQ~QSVNDGy_=**Z@OW3@#y4f4tT<7*ys5zp(N#@nO=^CU znR6|B;NUFd3~jPjo|z8Bk+*~Cb5IXFK*f%=(=29Fk7zWj?w?sFY^xhw?Zkx{jc#{F z`^l$lbfral=lI%Q+pw$)!H%mBdM}lhk8@khSp>oTH2r|sTR}|RBbbCw$;)<_=2dy3 zPo0Tlz|)=?rce1-%O#Dw>MRjB64N}fVQxS@P3)ywhCJWmO>l|v^>(SjXWXslA~%r? zN+)&%4vz;EQN@4dKTP0IKe0W&5v()3A=>k7y zC->82URsW%LSOIi;pO^hpXCadeywU0UNn&^e!kwK72;47)|6W6W?Ng?3?p)zRGKd| z(LR{?CfpV>;CzzN7`Og_E==?krQX%lhu^*IX6~2siG%^#*4=0Nl@O7nT2=JPCu(12 z4&Waxyu#RZY`oth%AH*vygdV2lJ}@aB;2!p%H=fNB_<_glxV-vLTpk6D3t636XIde0HZ2W{QOH~_n5;u zt)wjLh%`UtXE+n5nS5=v5>PudK)tzo+ZaU{qAU^EZwn(jZ-=}imEp+Lj7oA{`1FsW zp|VCT0tGvnA#Af(pWXY2kPLic(R7^^wu{<((^-em23IP!MaAo4ciWyj z8-&5Mi*7;tog^IA-`jkO(pHcv*8b)VR)B$#+w)%NnI)3G7X zhi6uOpWJNNz9c8Jl$XzHCUtjpLHm;UIiMYQB-1U8q>7&Y@7qm?HE%J?CQ_m2O1HgS z_c@B^a1yJKPaRRhz$G6)2`~x58pnI}F(~5aoK5WKb2Qj%3;WID1Q7Y zG`ci{c)Kwxi8^?(V>@puU8we6tFM1=r&0l0Z&3rfc`8|-2)awC2{lk8XR3=z3dap# z+cq{V{BhSROCsc#Z$qC33M!g1{pe|)v!(2d{z6Twc!bhSOC3f)c7 zr@uw><<9mhr9$<^+M(n@*L$j>d5_v^zI|wCIbzE}@%*cKqHV5RyYUB^7cY{U_BwvP zaglTEYv(~tfBJSgTC*!lCX!ZERP<4y=AwY)OKM|g`4k~-sW3{GO4Htmbcw*O$!aSF z?4Cd>@3n`m<7aoE)^`l7S$n{VQtcpjJ*X#vP{b zPZk>tiXeSaSCd^*3zl*zw{o&BWh z4D5KiFXiQ~I3y_BGyC5X=U%Js%|*5<*tmUL&$|k5$gAjvnI12=X*$Vu+l?Q zND1~OsfTXImnBsfRbXnWsJv+iUe65xLX!QNSKsf8y54)s#N0yPzPPxkpm~o)f`R3& z&v8|M-{N*0o7<&*vvcQg3GK=8Gz6~yeTu!2+|I3u0(kNKSO{dgmfFtH0bU zTe1>rVI7h^=)ua=6T6UsdRR$r)vH^m-}k2I&NCr5*U8nOkYs6y4AyU^v_+g$&jT5x zN)CB0mx2YgS+aAm^_5M`58Xw>YuC@axAQ3i%KXH>p!W$@hK+91E9A&SXhd~{g`Br6 zEUx;5pUEKj-g(0(JIgrTJJ%VUVVAQw4##!ms90*$c;SA**XdV4I9EnRC8Fv2d{aeB zYi<7ocHZA|wQWZkhHjaA{h;8uJdmEhMZj+JSQvf^9(NP@xG&B;ixV-jXE)ZiwVqD5 zV33evS>k1sIwx;wQrPWHJN9>umm8btzIfN}aqJv68#*zz7t=n)UfhTfoQ|9g^h*8Z z&Gxn6rp9Kf$zTI%9|&1=I^x)kn^qR7QPWumxU!qqzZK579-qFi>7&)r?*z3Icnzoz zE_}>^luj?z`^nS`fY3eMmRg}47*938JltR723N>>M|Mi^tBdnV+EoL`Mo5`wh zq@t2(TLqb2(;#VHyOzIf2cI4`YRlz8P*0paRQzjhs<#dPcA9g;Tyx+%d9gPQX8lAV7qBRY|fk=argQFvJSDQE5JNmR4 zXOWvr$Jotv0gB0YaY^Lbhw#Q}TDxyA#6LvuZOO05~Tg96{x(9!p|0Wl3ixmcMC5R zh^&%Jz@ME6xG@H!YFuPnFm_WHP0A##>dBJKql4Y#;ym`+OC1HK9Ak3wdu=>QwtYXePR_N zu2Ey8=98->$wZlvzx71L&*g+k3aR1jSc8r6QhaePi=meZJT@EStIdN2>dY47W$9~G zvk6?gUmi;=wNv>-9dC{`nw_r05&F8$PT^|F5sd(vU*V>s`{|@TT+I(;6=(ofxtA}* z@plD*Wi&_&8YTHm zPFG}l`10&<#^vnNz>IWruBBzo*C+YuJkoY)LcB&7&zLoeo?pP^gIgkEvh`a*o!K_A z=wfLFBj$hP;Iub@L-^T-Z@~=3MtCqLOpI1P$Ydx><^Xx{v(!%02pGqE4BbKJIurad z7(;0jUCw~|wuN=~f4a1nY+_>^H;F25&`|{hWBX`Vu3w;wHMLA z^o6zKZ%?#T3C6WVz$@xQ2Os~pAh6JoDopd*(pS+={=OUbAOCyl~nI2!?34(9_XL3N)|`XtQz({%|0Xmu>m&?>xOP zwCB6DwA3-G3EB33mEy6(?`Zww(so<=~t}x@XXHiHgBYPnhmfj-% zO8Z4A?FOfO&k73^S37;Y1GE*vqqtsL4 zzY+zk6hKSdE6O~V1&3V*BqgGc6_F->TPd)|_v&-#XtA#5!-o%rQgQ{4<1Ttkkhz6U zJIu%DQfB0i%^yl+GCA|%mj~Susi}7F5`bb+!r^39}s992s0EAR+=xyRzDC5ifvqH1hsnCk2658BA=K03>JfFw-si&&tM7|E*N zz}FCNQOltpYYK$qhRTd2lqAc6eAl;E)W$x3)-#TIQrseH$~0sy%dcL`XoqzDg8s24 zb7)%9ZM+N)k{`vc&W|M^ntW|bNS!o#`8ewD zf%i|fa0MTTfvp{;q{Z0HyA!)=OG<5%$Z%FVKPSgjgBr%(&l;h}j2FM(L*npz&qy17 zoW@l|#E`7xX3K(5bv*cNvVGz|l&v6PF-5$tWkru2d z@ZQs)jJ!~EMWOwdgKUtYI}AzG=gTR;V$z-il@`BN?5$p=FGp7a5fClXFF{_F&{B}8 z=v?Wqyx&xU-!py>k|pSAo~kri|7RNz}&=-!R);Ss0-U!0_MD za%Y$DLm(b5mb6pDhtQunS4L08qk)z2+k2QMN#B%0RQ;zCbuY8E**~Ddp&JFiE@`AP zp7AUWHw4<&(w}$4CZoAM-3u| zIqf6h5?C}B{-l?Tp{q>1SnsaKVs}V|2c-FOdzvej5TBwnV^KAMo}nw`Nki8U{;48+ zR>XU9pADMg1cYPM{_GZusn~VzlLnb#sZSNIHZk1vQcLes6zB#m0|uj_RiEBmLE%}| zC2X*D$A1EHBDerxo{|4m+Igx6=N(dO7%s>qSvy$WK<-a zm4eK35J?qWK7L%BlJWK1Zr)c_RsRQ_E4IR zt^77l&{3smm;1mSG*OC(>Pe_#9xo-Upko)2Qrw=J?%NpN6fcxZpcnkTx1J@-!Ck9k z!nHlAsVt+2`=|PLFCp{V1FaCb zzr6HYch#=hq;9b(4*s>=d1ClhHS7xO8r||T7V#4`Ed<(MKRa#=<)k}V42wAwBck#} zx49xw%v~z|TKdUz+^UzQfL5)3dk@*Bu6@3h`Ec57cnq=r+hK@IBiv#LLHn2II*&Aq zW^$caYTDEyvpzGr1)lbbVrhiv2|7W_JQZy}*9>p$y!nT|WsIKfC9+hUoW}o5Dl}8- zz8qrM?6K8v`H5@b6d04+=|;NbVrCn`J%yWabyCaI8!F$MR6cj%N|=1$HhmOwuz28f zF>)8eO*}l3qpcF)#)ou}sFfX2L|6;zAJb9K^ZE@9sW37#N5&D081IoWp$7!`Vmxl3 z?7o25g_|*ygr6a}It6eJIIQqma=At}q)s+Qa8wHXjf=nwm2OC!a8ufunU){CJZnnM zC}z1RvWOewSBSJ;-L$OU>4};_w0iDx23v@6c_Rqdr|Qd-EQ<+LUkez*UUxG;i+*26 zY%%vvEQf~S1i4bK@#ahEe4eUcpn5<27!pE&WBLl~WoGN>s#C?gc*F{#<%WC4Qkb8b zC~2t8bG>goE;Cb+l=l^0@>hA)#u3`}L6HgTsX4`poVVBA@f1D$qPoC;trT*2tINOo z{t1eb^;USrIn2gdaZv2%&X6gjy)TobiY$@dhWjAKTPNNK_jG5ZBRimDil6+?p_!L* zi-lvFoyQr<|zcPKV65=ZY?2hNC} zSCql7q)U+Yg!Z!rQCN6kzYM1dca7=8VU33@n>QgFD^KNeT@gDbf}DpYoekFbbmtkJ0?ch>mCh zw~Z8E1!?HZZj;0xL&927^68 zC>nOMTbb6Y?d)W&2>+qgeryJgEsu)3&=CupY$YAzO52do>NAVc@Xe`ViB&Cp?8^$C z?w%%RDN$6C+kL!9+vJNREd0neM=Jm>ld3xVfplT3Cbml=BHFl^ow>NE+RQS7l`p$^ zq&mkarIF@4Y37PRVSLQR6vCgQOc%f2)|JRHejj2JzL~0vJaDq&tA8)=3x_({Cz_#a zOfEQr^tZ(r@88X0hz?A$7DQG$O%|23W6jV9eC$dzumg#`F85Dzwszbl@#0Ic4)HiU zJ-+H4>DLIWa#%kJ@~GRo@E@IgXa}>hLh!x*aL?QV#uCKc?g6u=J%!8;KNzn|`a$-` zeP{ua1izD`P#@$qBPYCev1je!fOh9mH)LI_usg0a-!AsiAlqqHB<>o~-_w*2)rkc= zlv7i@l>r#i)IX}Te@zCK-#t+@toL8QiH{|GuK+2LfIN&Jvw*N`C@io`G7(&<61Lv~ ztk^%UX8}+&P}S$}lc2tZe5+#Ful|0H_wg0*X+L@vO{>)bb%ghuk(sl9Z$XV`4Xzjl zISQjS0dj`x*I#&lZvw076NJSl z{@Zp!6kPd@5>A7+7b_+&9~>1Eb6&{T^;caE96dBZ;g9|e6$>w6C@9i;zo?qo_s=kt z$-scC)BCs|9UJm3_wR86(|cNlzkz%Yh~GfEzSco}~` zERO8IVSp>Y??k-8+Y1EbBhg2Uf3X7;vtg96(?)e$j(_+bRX0OI3l%cqwXN zi~G-HTs{TwPg?%-i?kf@v)lwrp8rk;=sOZ`?lv@SriFZwKaDdY{+$d|@Xg12czLL( zHDKT@e?>Qt(>|{c=)jHlr5|7wfZHh7*AxFU==bH|Z(>oQ0!laVzu~9y^-un}4OTF? zVyMSR;AaV5hgwj`Um7Raa(9vLz+SpzcRDj3KK_&J?Cf-1amx??EHAA0AHhF<6v8U- z^SwJj^SvMC-}f^Q)?2Wh;?fK1r{G@VMk_S`jvEF1{a_ETKA_{_Q&!f{BC-YBe`6uP zTi_wlZbj2P$OFDX#~L5t_5Aa1zP>If*KZDXzQ6uw$bSmsvBbga!Sj5o_Gf$i8*ns% zedT)k>%X}IuO91t8W<>+$fJTk6%)u$klSMxJYCbp+eiNe#z>z>8O%Ex)m`=ffh^!& zyI|C_UVQ(Lh~Z-}YxwW(2Qd9N_25UrI8jMCq>?{ zwGq4`7phQXX`~SglJ3XI$e2GU40eL7si|p5*Zkio1ML!QsztlkUC6@$cw~TLG*qO` z>9$=xM$SFioVLP59L$#I~Z0<`3N?Ez0sBx0pEu^T9aE{?Zj{C(E{vo*zj+?9`e z{uS+3Y0Gs(y9Ypx;N(7+`zz!2?Gy|a78V9Aw(w&>b_VEa@$zy zt-)$vvQ;0MfK%R+u#2{@{+QVy#2Ko#(qv>};t1>)xv>5e6chfXN!qsyzws7N!vK)Iv(pPuj>u)+GICYfIQ(Sa^esGk^T}@eza1*>WJw zZz2gu?~?Pq6~nksX-(zOL{CqjuuZ_M(E-SG&lpl%Gn}@@2e;&8WWtx0pzG81l}}nO zKBSbQ#58Xlb938Fs(rXeLIV;vNYpBky8TLM_ODU+pEUzh;qQxa$9bp(0@_hJ^su*g zyhF#{nqe*7-`_s~czfNJqc^A90rc=>NRAD*wKG~6d&ietP{`W+Rwamn9i&<7mmcDG zm>2e_DhaFa@ck3qW7Pm2;X9VTV=(J})L0<2{H|v*lJ`2aaCv2=3qXyGi|t`9TZdKq zV8&vwHIR7{*v3dWQu{_A!0YeIM+3W=er-t)4=fq1B41|(U4STz6lyAgEXHbGoU^?L z5bj(C^6ziK52d>yg^gn+LP)tI0FE7pAl)XE{=1ITYQTA>@z*qq{lEtKnc|`NZv~&z zjIAiWe;dUPJK?p2CTiE(CO;x1l#ON8*9#px3Qi`|=ZZ2pV{CW+5W(WRkpas7I?=#PlX?%CaS#3Qr z^k~&v-^ake;_oKf12sBrnv4g?%{TgJU_EFiSEyv251+!dRF%g=R)9xJ)!Dfw_xtE< z?y!tiMee5(XYr(ue>T1^6&mr@F6PV6$o=_3CJijs9DxSK5YgdK3PE{$=df)`C8_|=AGpYQOY8qGC_nkKpY=em>1BwR-gG#m{G{+AWYb%z-Ra)vP`^QxtCWZm zoJyo@@ac?8H$^j$@Rz4{Y$Gv?3t;2ss|PTOf{2H8&>{DI3IS+nc1}(VbGKVN0J*ZW zvR*2RUOBDKSpVG~%2vSQRZH~;JP@NELbq*Lr>{TgV$K%#$5^&iza0(ext~5U&VAR^ zRpx4ai_4dUJ@Av2S&qoSHT@10a0i#LpibvY)6;q~^72ke*9cv^_|0kODVtC)q#!^3 zBfeZDR8$L~B8S=DKQZtv)M{`(s#+cCf$IMZ@z#`w6T@)OtYB=|wz5CPt?1jmXmVm6 z>$o+iPKeiWG`WzgaztcgFA3PzE=l}Wh-cbW=HWtnt+$6tA)>=L+f#Ne+$?M$gJQol zY-_w{qlw_r-)$tCzof))9yIRK0jk$}+x%X7!1)_;Ce91j2&SRHyuIj%e$ z6db72zzr~~tNRE2LWjZK*8o4)Lo!~gh86K`2Fq3ZQ-n>vy}!fZ`TMO0(OL?^?AgkoxfZU6J%>_7sl)%J^u!sLY zN8$?^U`3)*FkpZDE3CNIrsBEIt4h0Q>1MdDY?mymbMnH_8^@3fvpITEyHv8WP35;s zcP@V~hB*NJ=`I3Iy$ANZxmG&s-x_arZ?0NCHuBo$ouCmvQS~^{BS8)(?JlZhwqykx z+xJZpSXO8jk2xbdOiwhISx}E2Qj|;+k2q7<;Et|~%e=WHc(mHhn`kPl;hjT1cK(u| zzB+8oY8>-)Whb1f)|*+9E`TGen?FLQC7VCXgo>#vdh6Y@Gt2L>4sRO2o-j5^<%wx) z$FFIsN94jXoG`v3aZ35X9X$kSO#%hoScdBCjoTZP``WLn;owx|lt@>cNr{S5D}Dnj zH%KngA1fOA=~~^F07w`;!fPkhV+PFalpaO45qNpwt%Gw*+)N)L|03O3MXf$7BJ_!s zylU>@K1rUbN3z?S5q9f4lJp-3ll>=?wj8X_X0s30?WhvH)&oz@$vZr)&_zjaSy+A< zSMJ-j51&B%R1C9njOdx=Q>dXDr;rs(B>>xUuWsC6H#{$+x3L&npMpJ}2bQf9=xH0C9OC#TVAXj;)gvSLKnOsiNF6sr z?&Qp(KMcLTJbSR(fpaE3FD722gdXkw8+zH;TS);VIuP7n>}}!vp1;eUKupNar4y2? z=sNiMA;vp3{C$Drp<*cdX88p&x=)jayV+W{TsJ8@9uW}bkvz%@tg1q z&~pUY!^Q!)9&%$P`d&kMdEI^~O>chQJV^+{b-zl+ z($DCJE)yxM;x*YDw${yvPrO)aAPUL3s|A&$ z)4^H(nZeKo}`*HZ8)ScY>K`m?LnqZB2rRVffpkZEt9i`|MAnO zNNeShKI_rhduOkhisB5B0Ac{x$$PdR1l;k zt(&L&9eqko*4&kWLVgCzGaOQ`xBUyQqCg{38jsUyO?l9;5Yplux;Q#xK2=kUY+6LA zB)VBt`SNzs)|T}!JXoyl3BF(f|$QLo^JhD+vME|P%?11ZzUub0caH&VkI4J2= zRumI$Esx9<-5D#=Hhve{rBhfPeF;(^(KlDSJ{2CPwvDgCN)-}$qET*RbS!m7njng_ zbE?~o+n9Dn3^$O?b|3HMP+)>HZPvfp`bFZjo~Vos++Fr;bv^Ca5OFk3i%2+Q5?A0nlZ_XubI;PIx9@N zduBFwJ6`JCz3gLX1m%#m4WbA$rBlG6uzqCRyh1`~3D_izA0bIP-%( z0&Mb#*O^lB`FzR!jo?qfb=~1;tine33$DbSs_`wEIylcLKhbS3?4xo1%AQHFS-<87 zua|Bd%hK9xM-yD<=KjL3TnBNYNP0j1-cZwTGoa4b8M7Ot$*P^MpNRe_)ZgBywi@fR zFPRm*11uum-|73P?AO)6zb2;OA+{lUbEf3BRo?Cykyq3$4xF{#1q*@3iSy%4FQ!QO zK?Y^_IyJ(b%g)7+Q#GMQk*a7D2p#QIuy$Sc`si1i6xY2Rrc3kNNlCFu64iZ9$J zNsWqb*_h~m{Uq1#-c76I{QcNeTbVJh^@-`jg83a|Lp{{^N%Qc0k1B$vPwd$If>XUQ zX!40YYB=BwtD*`+_E~8inm+%4~X^z z?-yz{u(*vBMSAr^MX&LHjZ7PGqdzQOkFXOw>*)FwP4HSvXRRaRQR>n-F5<%>caU6= zKTNv>UkbI4(_>x!O*1_XMnOap(c9uKx9h$_d;%`p>G7h*#c9&z-A!c!CoHw?n|Eu^ zf3ZIJXo<4wqg8OQp4Kw3Wko6EI`Aw(Bid*DN*%N76FCZw`<7E#8E7>&wW@K-ox7K| zsFn;5e(&4pd#J+(>J3X}%-ly9V_@tGg6leZ$Zle87u&US4Ni2!B_`=*BC$2+R;N$} zb)2>y8Bd8;;PkZJ@mD~@!)wp5wlu;d`08ZeoZ~DAg7F;hCH?MtNm}NcHLdrCGZxNY z2dxCg=(NUG1=88lit(K%o4p$0>%)c4!w!m*upY(p#*wqre0WO@g!}i9p;Y7{e9ReM z=k0oT#FOS=ugT65NOlllsnsS*p#M~Nt96`Tm=;i5e#eJ`B1g@5z1FodQ4Iu0F_rpY zzkX30Vz0@bI!^+|2Arh4wC`)eAzXS3$I%#t0633nO^-swe%}E<<=?Wy_2kL-*Na$P6P^LKWj5Gk9B+PnWKtjG*E;0U7LUG5=Qsl3zL(pX zt8EYVp3P5>K*(Jjx9GOWL0K|OJ-4VRs&;@wcSB5Ft+UmTeT&#mKQ_opv&m8MH7LpDc zYZ1`Zo92XUn8uV8MOX5sO?yW2Aw}Q?-{{x!$dHeO19L{mY-`34Jr=^2N`P?m%C zCa7+@NU521O_bv2=ht=$U%;ev6+1eg1ubqO0RH0My}-9(5F@-9hsXs!3n-Md0cb!M z5yaD|!UAtJLo%pK@fRPe0lJ>glt*uuIDjNR_+Ed{;A|KHxhjQ5esf-bY}UF~WqqeX z$%S<)6T47n>fu}^s??kCV-3o78fn9xG+oJ-f`s#G0mU{pkM9YWzsoKwto$yscSkdV zEGP3VR5;Ie_gu2FipCSj(?E36g&W&hgab)9Er-1^d9~_D-tLsynjg?6d~3+@SSR6V z+M7k#=fH+zkA()EE3WPsp{ua@5d&v-2+*upKyVl<5s2IQ;}ib!tcU)Xj5{c5Tmr0z z2Q2Nn)Qj7sJMBY5L*z4E(_wBNrTTq@&-e~1g-v^i2j@Ha`D+p5C~;nTfaGN9rGN}= z;fb1P1uSomc-^Xl=3fF?<6K{gj^C9%1u-Tryk(92YRjIhK0lIby%>$Zf4~X#S=ti# z?Lg^<2Q;>yvulT1cxT=BQ5+Q4PONXxJ83|;bK*QCZ9wCbb7qMWu5Xl|J!}YzLf}j_ zB-IL<YTx_yJ)|9k z4U#@Y0t`3?PEyjRF%MRSzD{~0DGu2JPHWVlaS*h=dn2RE*}=|!kXj*fHq3iK^8cXC zm(TF>#H4Oejx>2OZCENAY3qCzOkMbuV*sjCBP-BX%zKhytIp)x1K67L(+Lat9`P3z zlkmgtYH1A4+RBEgBcgC$Q1wm zDJ(v0^UaXtBp;&5ixvWjTc`Br8B5`f0SV-4Ip)zo$4lG@iF zJ$l5~Ck*=Y=@D+;3-HLq{ly4;#63^j5rKE;s03gF5&>gt0DxV)TO!mxTjS*`>uaKK zkVVGbkq)-$_V<0cGOsN3!IYS+4WzTK9?%P+BS~tGt9>vvgT`p{vO<0Nt&w6~-uho3 z$hK-x+(y}%CNnCS%>vEkx=p-pR))?Oyk93TKf4+*uz*%qUD~vUya~&kNU9d6FFkLi zySQVc^uXr0JkptY*vi~8aoWJWcFl@vfxz!Bx}v~3Es2n8+1U-4pjnqWFM^fg;@jsp zl~;{dK=Im1O#*W-jGTXH-+;FSexehL$Rpu2{Rmf_(07?xj)(;zfF^Lz8XS@W#eC7g z)0)G!mHL?q8$d7snv)VXCQ2Z|I>lk19>9WF7d1T(i_SrELXr5O=eWsUi$|iw~CG9XDvc+xo6!`rEUn%Y~H`zI1+G$w9R9DvskU{A#PIpVoy8XOx z9N!)H)&~W^!^ZX=4Zvu)o{z?F3DN1}?E+Ww#HnNVqRJ#Ir}zm)u|*Vw?-ZvLHjGG2 zRIK}MMTtu@FEbyds%Y=ja^NJS2HH&ZH@fVe=)gs<)|E0Kv}l)%t;8fgmMVm(-gsEi zVr@GsX|+(7vjb-jfvG;QY*_?uIZfBuhsMLUSTOsP00m|;Bo={!shYo6$9y<6%q!P`p|lUX;i;whtPiIx_}djHOMhRJv=070z#4#^R@1%mbW zupoIOqw-)ck5Ksw_ke1H%8p+ZDl#@xkY?Sgv2%CLKJsaaojTi(BTBMGCHGe!<2^#b z5B6zL_SUO2sIk#|4BPClH1(E=OwT=z3JScxWXAVY$uDR`&2YO7&SBgqs}1*#CFeb| zwUWG`al08t=kV+(AIdWlwzrK==g!HQOrm<_H!TrR;iVb#h#*$Ba0Du20s)X>14@Ph*e!C>wYtga5|fdjFnM= zzn3*}-Wq=lF!|gt?`#$5UT$lZokmAxX>U#2&nOw^Huuiy>K6<6WLkc11 z>M8X;QBHD>`fvMFVQEEcgab098YOy+#%;m*h^}8h5mX**p%)s>aq#K$m+NKZU7-1c zq}xN?mi(scYdtr!1to|65VP(ts$o5bHGo8$)~iN1qZHFvTF@G^eNmJ;L1l6sp`fCX zLlsUblRU{CUC73-tteWa2uHGYuu%N8Ht{Q@U!+m0mwdC@y`pwpDUDQMZjMsu&Xs*F z9-0}v@J+9C60URipn|SO+-xG&>y@H#bI@Yl#RrV%0@Yif9D|OG*A}{==|Vk=DL;Ra zI@Y;pY^EjtoAvBCP!qa(PDRJ-5lG%KP20$G1$}pT&Zxh9k;s4n0|4zcPx8X{hP~uJ_-@NQ=CPq za}elQTYW^ias1M%sjQK<*O}2T!p=#bSBj)xR^p=B$6-tE$XW%e(2Y4_NdFYgal%mq zw=Qnyxx#vek8~CfPA}}UC!BZH(}<*lyr(lZF!kyMhf8TVPPd4CGga)i1g5jh3`Q^1 z&N=CxAUu}KT6t`_&hgeEu$zg1d>ZY5D0^$Ppx}#kfc<&dxlnz7{@O{K!>s7hGSxGe z?91tUh0&&&ToyBs6VsJ0Dy4M;*875ZdHKFIZEK%rklaH6Iefz45?FuP;(f(*&wNWv z>UP|zh&Ht#Z%_2G9KDQ%Ou8HlOcr#v29l+{Ia}dw9CNfWsLe{s{)6tm9bTRd$~K|f z7=9$(so{rbiHDGhN|W?6>h*!I!WE)kaDtIbDD2FJNsz<1$WT=3{KQ&!N`2)VOYLbI zOHi%IWx1z_RsExwHaYE2Cwm=Hu*6D}$y#Q~u+)K;$m0v)nr|H6mc_JiPat*HL2JHi z`edu>8%1ta_F9RbwzE)zSrNH~{nwxyY-9!vhQ<_66Nb`!Ao}LAn4+Jxo``~M5H?fO zTSbJ%L?x{J(tLSL>9!0OnvNlWxE+9lD+iJ%LZC|_AaX)}(TSF|wLT&sFVksn$$K7l z?O6)pm^gnVC}rS!s5@b#F!169fVU0YFLW6hStFK3n{0Iqr#Ce;be>redK>Bx&n0s~ zI4T`xD)%V;zTl15aLU=%?RrgAu*{s7-Kd^fY)DkNp7=n&9x*JfuHSILg_|qgxz;iB z1>~Pht7?UYPegmS)=>$}dl6{&LRTlMU0Td<9RiMdyb>8Uw!SEL6;&1nH$Xlr<}^0d z8+Ar#?Y*6TE2P$o;p&KYHS04oU)h6KA3jps=B;q|gZc(SPH z7dH?#`A7*jAXwZOTdeVuf_0RciSH>{j5p00KMCR*duc{Sw7c4eWGjMUdiK2uH9nO~ z7Gsg3{IbFuSkX^(?w{H-v{h3tH8f%Xe5?683A7`J-dtZ%hK=Q`wl8`^TpYmDs~;uJ zoUSetY5QD8YZ>2Tf?QVA?R{D6_b@WD2+;Rpy5M(e@isY|33K#by1wKwA85U@Z)_Tq zR1k!1EIs^Tj&Y%CK_M|O*CiTdp#Tl171g?`*@asWFW&PMf1=cij}Ak7ir@Gypp}{&o&yTsS#j{5X@K?8__7KD6_8tK4QxG8)l+hG^oWfXOPKCnOhiPYoPV1QwAB{{Q5V2E zTj6lej|uykZcWc3zkt*Vl6jAedW;8N@19OjFD~e}Q@KCunIs{C^arPRGQX;7P+yT- zT*~@fQ9z&ls;wG_Qwb&)%GsO99k{^cty&%6<4eV zyeUj7Ps1FtD}YNsV{fUQ~*p+@P=t_lZoKT>S4 z_5rgT*{v1|xZsrj#PC;zBT@)b={pfa83!^?on-ehLUeT(Ekc7?KIR)?yM)+9pyK`3 zPdnS?hOMw1^Z<~JL zsUP70wT^5W%6Uzu!Lhw5HOOuH_xUpH_qQ_s_Y(ie|Z9Ya0E z^Q#bVWwlbKSVMYX{b10%T%Hs;Myd1d_w2|l<|c^nq2ORMjC~c!Yg~IRzU4Jca4beP}b^u z#{%Qtx5|}_-j-)OWmoZ15BF;WMa=yli+(FIgAoDa5?ZP5gsc9u~5!&jIw>jo&EJsg<*B zxVddra3O%`-xXjs)HV*`?cm(BHGgigL*C~w3H_zp5<{%9xWWci=jnzy!4{}f>}YLLarSpV0ZM_w+s`-`Vp8!= z$3xLpE}dqO9#Bx@p-<_tW?O%La9Au^*c6g_2lqLk-x2v8_4ptBS`9MU$vCd?+T>yE z>5fA2R}4?jrF`ettodDp*?->ckdr6kxCjMD{PMu{ zjK)|cYs432&6FmW_u3zOMf#Xg(`mjq=82^c3reiqRCI2jx{W7f@XVZFWOO8)HwSPt z%Dt;HM<`#z#?Xi0jzP-#9LHj5NxmhHTsw=YMaQdz@yCt0vGsky30kG$M;AaT#4clOiBSyBF-N86VYp7*G}k0*WhTF` z%e^X`$2$LHZ!1EbeWyP{Dcnoc$!fPZJM$bdu}OJc(NBlU4|NO;LOiE93@ANV`8e&L zj=_0IS~|yEGYRB_8YXFq=ef-9&Avw9D8M>6ExNM%j5>aOSYPLzzAM^&ThUyDae^Lo z(ubRwqRIYrK;%lSlXOYTnsgW=lU+ZTSGZ1>-cM^YcYmrECN*5f@9X8=d@vK~+*?WXmBAl*Qf|xK(HBV;=91YRS7y(r+t+*EyJS|_GXWVSzR$k2zVkCACoXQo_XF*Ki z$Iuknq%o^qp{G}_wwz0s!ZWP>^-jW|2ppPrW&`C#GjPq;w^<7>4|#CUWV*=z3thSB zww9>mXh?&PMZPR1142?rGJ=i8dKx2;#*oC5e%%4ebS&+`pW;x@P(beCxdgX9pnzjH z=}wz1Tgwz&)68yAcakG_4C>42hlv=g@j1qB^y%zG_E_=WVvtt@;AUgL#eWsQ#eY^s zpTMb3jRRQ{{}2$OzTiwCX?swki^YFx9M&{aLeyO4vVwZCC0e!>p}_jBE99#-e_oB+ zv8moEHciSmZKZQrcz4_gT)pGKJBU~hdcj50;~<6iVg*(OqR;$et05a#LGk*zs4j?R zUMYPm7UE&i7T2`PJ8Vry8$yg{Ujnnmzlvjj^BB6Ipg>P2)Z&l{)W1}dZKS`}P@7e& zS!@}?(ZYKZlt|`}j!}j0fk3dE=)vRwxSmuIatqC-|0r9#q6?FKO{Y?A7nHxUaC7`; z&JS=S-jk6!U6(PE!W`|-oReQGW6B)`%3qf{i6H!!QO_H=Iygc#W+b5sM&fOqhu?9g zfZF;sKQv_6s{Ta-V|pQ$n@U|))Z_@hak2zsbf4CV{Wo8$3_$k-4?BIOc_a@W<1^$w zZ$z>Tuan_1F5#KkQ^uDF9dhEGHsLRucY%xsN?tK)GXSk2SA!X810(DutnCUAWU$im?cpXM%Sph|O z>pYM3H6gdvv$HZzfZ&yW`?$9=w1@Ut<)2;D-CZJ6hbV!I5X6h+s&EOJPM z_4W0!2B3Hy?;P@ZLHas}{hb0f`#aJJ*Zm=Z3WO2yeZ8YLwFQu{`dlQ6pmA=#q5O=7 zBcV8^DcZgb=W@~2Xj~D9=AVssoY|O2bZ$}lUVQ3jcj~~q?Ffxx!2-pOTlEkeBds<^0Skpx1g}lqjByo zb;VpC|CyP46x}CRh3tg(=Yi+}@w-icGowOCH94Q~O*OZBZS`zJdGHtjEzNQ!a}!f; zYvgZdyiFh)nNXl=F$9=evkOQXuRJ*bbzur)A$T#H20_=H{%l_nF(DhoCPt2IfW;b( zwW+eL>?Vvak~5)cXNqx@KS!my3?D$QppvkGr5{y_X>TXfAQ|z-fUYBBVYiN(yu+ul zPodh8+D=9re1V5_5lRcD?XauVKqqpAyZx6GsUYs@a)e_@?r+Jt!Y{FW^R$p5?!q)L zdA#HX61_d4tK+q^a*D&vbqr8HF$X7(&smKL-0Dq>k({D!OVl2}?%4FEmX@kHSiZo$ zKWE@lnb071&^bq7w+uvZ06#x#>e2d~lEEu3E-s7<(df1};#=lO>Pd%DnPtDPO z-NL&yU7n(F&*wNbYW99QGoRCR0830STzp>G)$FyqOw$XfY$I}sd~7#RcCo5E#z7qU zWDOl2tKnaOnAj|?<%G<|t!5wZZ)bngIc-_+l_7BNYE>9)3r;T&#N zL8r+^#jp-^O^=R`%iO=8+?X;n=sL|@h|0hfcWOH#8#Z7iU*g!sDgWP8ltSPyyfKd! zHwFs59;JUOp? za~STizi!bf7sJ^&(%UCA3yT_ufi35IFqtelnkA7S!IuV4H03}8MiZSY%K>DZomhDN zquzfGyHfEgkdav=PAordj#_!3fu%usWrNl~WE?nIbOFhdBg0(HEv>~6m_0?r@{r2W zaDCxB%f{cbTMd}gFINO#$rED#56Z@U@<7$p9V4jtLz4dS z$I@cJEZx}JP!u>Eeee*V`)Nmx`fC6E->5#M_#O9GR9HoHhun72X==gCtB+E+t~}eP z$KbiA;hWToJ-(oWu3F?{z3r@(GZrdoLvY!;8v6jpM~ z_@AYp9dMiF!FC=i4>Le*9A^8J{r&yrNVT6!9ZTY~AAfjA7Xy$T0CENM5A}_p;;Dfs zQkukb(j^e$7ix{EW~*Hg+1oBO8=LCMPvJ`l4W(;rY-}sDpRn7QrGn^U8w_vag%2Mn zH;U{aeMHQASVIf<@76)q<8^s&oa;N1(%?e&Lvk3WW@I#hf3HdT;nHZMaU#`|$ylAY zu8k0A{WRt{_|m5t^zU)bXdEf7b1^W*x7VuoBI*JtDkvui5Aa3+P_WQ3leR2aVx-Y9 zfSTj@?mDpTLI#d22*a0x+ZSK~0U&nMue2LYy1PF>G3eR?-gyRQ&Ejz7EETi?qWFaq z@>k0t$8PPmS9j=<8*E)f%sVbGzaHXTa2XAeYA!-D5Y>CG1%Mm<9*C1ifzPw2Y_mh; z(r6*KVuS+CPdrR1aA3pTu^(MrSa^TB38H4kxhfe7R2%SH8f=CFv!<&cih2hF3k@Mq z<$(r(zU6IiPUj^bTkzT#BP;ZRwIW$J1|fB%Wn@CfnxbUix~`;ZS2!70d$uP{K{PW0 zi0FSxL?xF^$y=L2 zGknv!7vv-C5(R@zx_g_d_OeLEiIFa3M_lLudg$m}-9GbVX-&)X+?J-F+SXtN-@I>3 z!$IM>E=fDGk7gs&GYFYM>OKF6nj#03kM|}aw&I)_o!kB+4#^)u91ZsnlO^YH#|ZDP z8?>T7e5?P=o0!qR>g^fd+N>*GKWYkru&8SPRLX;CT?5q5!eejIb9koC*X5loWF(Zk zNJ**m5)%_yA;-->L2Nl0|ZcAVZC7#L)^vs^mlZ3kiU zR8BoO>4PGAqsa~Xx}Qq)v)1DY4l~agXLZtZuE(&z)_M8oc20J7^G1$=>*#EMNySWF zV=R?`Q*=ApwT#jKk3bdXO9CPs%6xO$2Sq#y8|}~<*|cWGHJj&V zd6+T4Kgm5J5a{@8pjrzo~9%C&A}okv2M?T(*! zqLU3bBd~%8x+ieYm?h2H z361Ihz6t_acoeC8QoPXjCM;XTkllpoS^;D^xaI0vZg!^?n0PZXj){+~j)}N@ee5YY zyyd}ZZf!X1HdPzOjSUUKpl*?3Z^?T2VcrcHmOjVXNPJXk-*FBr3Sam@#u zdAZsKm*VwmTznL?w~SkpCDNN>2Dn;UnttDd{JRo&5vxUDCk$5WfelW6eI>8zO-&7q zMQ`HXwu!B^v~5euyXRCjJzHFpt^eR4=aWJ;W5{X2m&$y@sm+&cd?3p}tk1dz&6>4QMCTn%ZR@fzQY5JZ z98@BkatqVVrkf$9(1r3rxjT14%zVj(TrrJqkSu|>1vo^_rwH(0FU);)VqhuLdHE?f|!1`M+GItLQBmE|c0 z$fO4Ijn|pysw(Ya;tL;h;EnDP2ezZ|l#f!eF2lT5t#IuN0LcQ&@`gL1nqV3cz2w8b z&%%cjmld`T%O~i452qij%A#zrY(L8UTy$7f_<9X1SPf78RMf%0!uA-2mkv}tTTvP5 z!CLZriIRZf$ok^YqWxay-yw3Xbnfo|M*&t1W@Ftqfdc{@xx8BO;e- zHYL2j%fW_W|M#iu_fxh3_8TFSB=`;G|Jj8c@zH~B!M+>&?*S5??X3%@VY~L$g}=)| z$-$t~Gq-2{yO9geK~$!DjtTo<1B5*G5fhkcIjdaX!<`s3-4g(+#s6JrZi5E!{a(y2 zNgC&Vna?z@rNoHEbrzJ>5n!UuN7haq zv_f+&hlCv9IJ*40uIw@FD78Gf9Xg z=}k)Ri8_42^H2by`4i1SI8g!tvVBdT4;u(O@l*UHlv9wLKduQ}&Ih`{PGX?a5-3bA zpcJ9n-b+fn`^AWRL*=jq#jP(a!Kj zA8)?~X7L0VMv&?!#M$5*(P2@D+F3+Q=_J-JRPXodu|JYS5DDf#w{g zhUyV9K#lGMg~k45k7)U!{>dYJ_E87VgJy^$iDkrv6s&RowK6`F$SVw-Xx83u>F>V2 zgH&~ew~YKR8aF!cUUCI8ct{~42iOvL|o;(UWt`XUhQ52$J*tCowh#nH!;6v$2g1K6$=St)*N2Zq z*#Q9c{o&Bdo5%S8x1^l!-QSeJ!lLe-Yt*{^t86X!OerD+wdl?z>6!w~S;YGKIzf;< zvcvlQ?PrW&T1;L0zFFKl0Z8#D`XIp{`GJUsC@Zh3Z{#{;QZ)5x47()zoRnFs3USUy ztpUkzos=cE@yEs>IU`CREa3X|k(7ci3JGA0U9s)fc9y<>|9;udj416nEPBZo!63%+&7jUb#>`_2H_3NL$YwkKW@Y}s!H7Lu+4Px+I05-t`b{0j$N5U)(>&T2mc6I0`2 z0wK2fP0{)Xpw@02NuvS%bkNqmpG%Y$_M=#tvb}isZYIH5DNqHC=(`r{D6;MlWY7-l z`uJ-YnFcK8aiskY5v;So9`L z3>T%v^vycYHUx;`;m`EMGoOgEiY!g#wqwh?_QYIL8HEC`BuEi&e$V7e$6(}u(0fP8 z8h+Mg+)KAnbto^gHu=o@pLe^EB^v);Wy#{7BE_Dn87*>=4HNX@tR2%eVLO4(NMur3#SZG%*9gt}(t|Kl?*1K_#PsraLr1@R zZA$~(G;Mdv28Ac#bLJ|oZj~#f^$KNiagk_Cwe^_l&&qMI?B_h2B^!^rd`VGB=}P-V zc4*Yly7=k@scck>SsQbXZ=A)BmEbFzdb^GHj#53x*{^obg}Nog(Uq0?hQ{ZM*%cI{ zy*TeGRmeOJFK)ZSrJmsS5QDL9<<&|gS_ypM^W%q-%a1g2_KA!lrF#!VoosY2mu)%w zt0i9Vu<4M|aH72EIL#9=vgH^qHNPHqu~YWh10rkI4je$;%Nxr?FB zufEQ_Hjrnq&Zyl!c7`nKGQQ(VPkKaQH+EHv{2*&5VU3+Xu)4-})A}plQ zl+zu@q%DvZDNuI`qW*f_gF9VAb%_nd@x4k~4;!}SiHIp!8|C|C|vI`m9enInsHVn02}@4iKu^E_nb80v!a zvR7V9Bx{up3y$Av<7&}R<8CvdlF~g|$1tT-bZ2c(@wA@wty|$z)_Y+mg{x$reB&x1 zlE&imBN&}`4vZR5VJp<{HRTjifrFR_rYcGO@3 zy;})OBmjD`D$z?Nu4<c1XUW9AmBC<=ATQ8Lde*CGQy*F4a%!rSg4 zGXupRHu*fY?FvfN#pDu5yvfjAOFL833?5sc;wZ@2#yI%JvA=pgWjCJB=0v9`i!~w~ zb4i4CfG*1`L2svW_lMH+_g(X*JHbpZtqTlCJ0gSu0w`TmLT;9*>0Co;`NJ;>nT)?s*T(~HP^3_x#HuYY1AK>EwdbQ)CVM0 zGlpAEd0dH`%1&0Zkzx7vzS#wJz9edU&q5K0ND;-F+CYQ9xCrm-%Vuna0H9tS&ZH+- z=OZyP5CKR2!pb);B`q!E9UqK3MHwL=WwpEnPeJs=5vyL+-x&z)&ah@R~vuHMm!wpyYg>uk?BovVj5R^&2 zHX}tliRE~oI90oP`l1NW`w5F_k)*!%qT54aN=grFCYBqX!Q!E@Qm#dCVyXF(`(SR$ zfCm>9OX2Rz&n>Hw5e9hJ%EM8*Jh9ic`$*cNFgD#XX6;6AwksdT$9dj2(N(x`8tW)o zPs#i(J#7_@4TrVO@1}Qb$2PEZrq1ioS{@^hFJme)JI2?M9DD0yj)}3;Q%QLN8F#q^ zhz}G!)rt%B-orj6OT|>+-;Ne{mv7fxs$m#@iN>`%O;cI5G;$^9;HA&1mgC&dUfn?c+@_d3^0L>%+G5 zVQP(>Yh4#_G^!-5TGe`0TJ_+vZ-zaXquFW@*s@D=sn`w8_sFcL30xica#s7|@;gn| z&2DPH8vHY3_`LZ8cWPmLnJXR=k1=R2J$zS zgn=&$(^*cLwXV*s>g$!*Z121akJe_Oz zs~F1NRWff&uILPSNg?(^;MBZ@@^B@GTa#ZRb2!T}7O+T_KWvj6%qxzzmAa6TyC{g1 zeb(@M{6zFJe&xn@Ei{{vPe-9~SJM~iJdAm{N__g(e3f*$L#kt?5*s_Vyxv$$yxqt# z8kzaKxgilEZy8*UYuCSYo7OL1L6J6%8 zQRDa=CXpz#nef2!q+9&fPS3k;nYWvb#ygKjaUF$>3}|3p!3~ivkl!ncWcqaUPzC== z0n6O$sl5_s)(a|;w&!a0BGxox()o-H$s|rhV4-K|{F5Y#-*7(?zmr9)fh{Hxj0x4i z;G2_SPCUA-`GkQ~JocHKxZFiC!sQ$KNzJL9TU53e8c^&Uyr-)RWBb?QC)Q|AO>DIA z&G)~JxBMRJ=s4kj$y|RZyRU0?Xp*{O08dL6i|2D(pW8L+AssWmPExovqDzRHb$Upn zWQe;YVQA^AV)aiGY)Ngsxj6E-0W4m9UdfxJLSOsU+?FuCdQ}PQM~>j=NQ;Zz>x`I} zSQFMLVbQsJ!WykSU%7aXK``oEJH>sS&G|_u$)UxS!M@H&HDUK(_W~?V9Qjscs$*p- zU)cW^qsdSoP*ZeeVc@ji&z0H4uHiC{m8PIXjB@j;wv(z!@$tluvodR`nf5)3R*Xvsc$D`YgSBe6%th1TI>bkG8TD~gEiaNR1NEK4UMi9rlqv}!Src1^9%T1GU z+x>GQPYSiQKvJH@6#ft!yFdpX-bswntgh?wNT1ij!+62+j6lksPT}_tvz^oDW`7n= zuv)2NXtEmHt#c=e1=Vhg^on4u)n33qnJ4O;U#%JGa>I?G(rxVe?2jrsj>%SH{)@5B~OK4u6=C>#cUH=VGs8k^bj`3qd1(*my4jz3DjkTUu^- zAAy8scrS>24C?o?2Zm@~J+4m3E}X!1RV`3u(NqiiH8h|S7{?~5R2qxDgmV_XO)h)u z&;LNFv` POP tripole grid and land mask. These are binary unformatted, direct access, Big Endian files. +The input grid file for the B-grid and CD-grid is identical. That file +contains each cells' HTN, HTE, ULON, ULAT, and kmt value. From those +variables, the longitude, latitude, grid lengths (dx and dy), areas, +and masks can be derived for all grids. Table :ref:`tab-gridvars` lists +the primary prognostic grid variable names on the different grids. + +.. _tab-gridvars: + +.. table:: Primary CICE Prognostic Grid Variable Names + + +----------------+-------+-------+-------+-------+ + | variable | T | U | N | E | + +================+=======+=======+=======+=======+ + | longitude | TLON | ULON | NLON | ELON | + +----------------+-------+-------+-------+-------+ + | latitude | TLAT | ULAT | NLAT | ELAT | + +----------------+-------+-------+-------+-------+ + | dx | dxt | dxu | dxn | dxe | + +----------------+-------+-------+-------+-------+ + | dy | dyt | dyu | dyn | dye | + +----------------+-------+-------+-------+-------+ + | area | tarea | uarea | narea | earea | + +----------------+-------+-------+-------+-------+ + | mask (logical) | tmask | umask | nmask | emask | + +----------------+-------+-------+-------+-------+ + | mask (real) | hm | uvm | npm | epm | + +----------------+-------+-------+-------+-------+ + + In CESM, the sea ice model may exchange coupling fluxes using a different grid than the computational grid. This functionality is activated using the namelist variable ``gridcpl_file``. @@ -331,27 +376,36 @@ testing. Masks ***** -A land mask hm (:math:`M_h`) is specified in the cell centers, with 0 -representing land and 1 representing ocean cells. A corresponding mask -uvm (:math:`M_u`) for velocity and other corner quantities is given by +A land mask hm (:math:`M_h`) is specified in the cell centers (on the +T-grid), with 0 +representing land and 1 representing ocean cells. Corresponding masks +for the U, N, and E grids are given by .. math:: M_u(i,j)=\min\{M_h(l),\,l=(i,j),\,(i+1,j),\,(i,j+1),\,(i+1,j+1)\}. -The logical masks ``tmask`` and ``umask`` (which correspond to the real masks -``hm`` and ``uvm``, respectively) are useful in conditional statements. +.. math:: + M_n(i,j)=\min\{M_h(l),\,l=(i,j),\,(i,j+1)\}. + +.. math:: + M_e(i,j)=\min\{M_h(l),\,l=(i,j),\,(i+1,j)\}. + +The logical masks ``tmask``, ``umask``, ``nmask``, and ``emask`` +(which correspond to the real masks ``hm``, ``uvm``, ``npm``, and ``epm`` +respectively) are useful in conditional statements. In addition to the land masks, two other masks are implemented in *dyn\_prep* in order to reduce the dynamics component’s work on a global -grid. At each time step the logical masks ``ice_tmask`` and ``ice_umask`` are +grid. At each time step the logical masks ``icetmask`` and ``iceumask`` are determined from the current ice extent, such that they have the value “true” wherever ice exists. They also include a border of cells around the ice pack for numerical purposes. These masks are used in the dynamics component to prevent unnecessary calculations on grid points where there is no ice. They are not used in the thermodynamics component, so that ice may form in previously ice-free cells. Like the -land masks ``hm`` and ``uvm``, the ice extent masks ``ice_tmask`` and ``ice_umask`` -are for T-cells and U-cells, respectively. +land masks ``hm`` and ``uvm``, the ice extent masks ``icetmask`` and ``iceumask`` +are for T-cells and U-cells, respectively. Note that the ice extent masks +``iceemask`` and ``icenmask`` are also defined when using the C or CD grid. Improved parallel performance may result from utilizing halo masks for boundary updates of the full ice state, incremental remapping transport, @@ -367,6 +421,122 @@ or southern hemispheres, respectively. Special constants (``spval`` and points in the history files and diagnostics. +.. _interpolation: + +**************************** +Interpolating between grids +**************************** + +Fields in CICE are generally defined at particular grid locations, such as T cell centers, +U corners, or N or E edges. These are assigned internally in CICE based on the ``grid_ice`` +namelist variable. Forcing/coupling fields are also associated with a +specific set of grid locations that may or may not be the same as on the internal CICE model grid. +The namelist variables ``grid_atm`` and ``grid_ocn`` define the forcing/coupling grids. +The ``grid_ice``, ``grid_atm``, and ``grid_ocn`` variables are independent and take +values like ``A``, ``B``, ``C``, or ``CD`` consistent with the Arakawa grid convention :cite:`Arakawa77`. +The relationship between the grid system and the internal grids is shown in :ref:`tab-gridsys`. + +.. _tab-gridsys: + +.. table:: Grid System and Type Definitions + :align: center + + +--------------+----------------+----------------+----------------+ + | grid system | thermo grid | u dynamic grid | v dynamic grid | + +==============+================+================+================+ + | A | T | T | T | + +--------------+----------------+----------------+----------------+ + | B | T | U | U | + +--------------+----------------+----------------+----------------+ + | C | T | E | N | + +--------------+----------------+----------------+----------------+ + | CD | T | N+E | N+E | + +--------------+----------------+----------------+----------------+ + +For all grid systems, thermodynamic variables are always defined on the ``T`` grid for the model and +model forcing/coupling fields. However, the dynamics u and v fields vary. +In the ``CD`` grid, there are twice as many u and v fields as on the other grids. Within the CICE model, +the variables ``grid_ice_thrm``, ``grid_ice_dynu``, ``grid_ice_dynv``, ``grid_atm_thrm``, +``grid_atm_dynu``, ``grid_atm_dynv``, ``grid_ocn_thrm``, ``grid_ocn_dynu``, and ``grid_ocn_dynv`` are +character strings (``T``, ``U``, ``N``, ``E`` , ``NE``) derived from the ``grid_ice``, ``grid_atm``, +and ``grid_ocn`` namelist values. + +The CICE model has several internal methods that will interpolate (a.k.a. map or average) fields on +(``T``, ``U``, ``N``, ``E``, ``NE``) grids to (``T``, ``U``, ``N``, ``E``). An interpolation +to an identical grid results in a field copy. The generic interface to this method is ``grid_average_X2Y``, +and there are several forms. + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1,grid1,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1 ! work1 grid (T, U, N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U, N, E) + +where type is an interpolation type with the following valid values, + +type = ``S`` is a normalized, masked, area-weighted interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (M_{1i}A_{1i}work1_{i})} {\sum_{i=1}^{n} (M_{1i}A_{1i})} + +type = ``A`` is a normalized, unmasked, area-weighted interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (A_{1i}work1_{i})} {\sum_{i=1}^{n} (A_{1i})} + +type = ``F`` is a normalized, unmasked, conservative flux interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (A_{1i}work1_{i})} {n*A_{2}} + +with A defined as the appropriate gridcell area and M as the gridcell mask. +Another form of the ``grid_average_X2Y`` is + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1,grid1,wght1,mask1,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1(:,:,:) ! input field(nx_block, ny_block, max_blocks) + real (kind=dbl_kind), intent(in) :: wght1(:,:,:) ! input weight(nx_block, ny_block, max_blocks) + real (kind=dbl_kind), intent(in) :: mask1(:,:,:) ! input mask(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1 ! work1 grid (T, U, N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U, N, E) + +In this case, the input arrays `wght1` and `mask1` are used in the interpolation equations instead of gridcell +area and mask. This version allows the user to define the weights and mask +explicitly. This implementation is supported only for type = ``S`` or ``A`` interpolations. + +A final form of the ``grid_average_X2Y`` interface is + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1a,grid1a,work1b,grid1b,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1a(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1a ! work1 grid (N, E) + real (kind=dbl_kind), intent(in) :: work1b(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1b ! work1 grid (N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U) + +This version supports mapping from an ``NE`` grid to a ``T`` or ``U`` grid. In this case, the ``1a`` arguments +are for either the `N` or `E` field and the 1b arguments are for the complementary field (``E`` or ``N`` respectively). +At present, only ``S`` type mappings are supported with this interface. + +In all cases, the work1, wght1, and mask1 input arrays should have correct halo values when called. Examples of usage +can be found in the source code, but the following example maps the uocn and vocn fields from their native +forcing/coupling grid to the ``U`` grid using a masked, area-weighted, average method. + +.. code-block:: fortran + + call grid_average_X2Y('S', uocn, grid_ocn_dynu, uocnU, 'U') + call grid_average_X2Y('S', vocn, grid_ocn_dynv, vocnU, 'U') + + .. _performance: *************** @@ -466,7 +636,9 @@ block equally. This is useful in POP which always has work in each block and is written with a lot of array syntax requiring calculations over entire blocks (whether or not land is present). This option is provided in CICE as well for -direct-communication compatibility with POP. The ‘latitude’ option +direct-communication compatibility with POP. Blocks that contain 100% +land grid cells are eliminated with 'block'. The 'blockall' option is identical +to 'block' but does not do land block elimination. The ‘latitude’ option weights the blocks based on latitude and the number of ocean grid cells they contain. Many of the non-cartesian decompositions support automatic land block elimination and provide alternative ways to @@ -855,9 +1027,9 @@ t_e`) is thus .. math:: dte = dt\_dyn/ndte. -A second parameter, :math:`E_\circ` (``eyc``), defines the elastic wave +A second parameter, :math:`E_\circ` (``elasticDamp``), defines the elastic wave damping timescale :math:`T`, described in Section :ref:`dynam`, as -``eyc * dt_dyn``. The forcing terms are not updated during the subcycling. +``elasticDamp * dt_dyn``. The forcing terms are not updated during the subcycling. Given the small step (``dte``) at which the EVP dynamics model is subcycled, the elastic parameter :math:`E` is also limited by stability constraints, as discussed in :cite:`Hunke97`. Linear stability @@ -1066,6 +1238,11 @@ namelist, the point associated with ``lonpnt(1)`` and ``latpnt(1)`` is used. ``debug_model`` is normally used when the model aborts and needs to be debugged in detail at a particular (usually failing) grid point. +Memory use diagnostics are controlled by the logical namelist ``memory_stats``. +This feature uses an intrinsic query in C defined in **ice\_memusage\_gptl.c**. +Memory diagnostics will be written at the the frequency defined by +diagfreq. + Timers are declared and initialized in **ice\_timers.F90**, and the code to be timed is wrapped with calls to *ice\_timer\_start* and *ice\_timer\_stop*. Finally, *ice\_timer\_print* writes the results to @@ -1123,15 +1300,17 @@ The timers use *MPI\_WTIME* for parallel runs and the F90 intrinsic +--------------+-------------+----------------------------------------------------+ | 14 | Bound | boundary conditions and subdomain communications | +--------------+-------------+----------------------------------------------------+ - | 15 | BGC | biogeochemistry, part of Thermo timer | + | 15 | BundBound | halo update bundle copy | + +--------------+-------------+----------------------------------------------------+ + | 16 | BGC | biogeochemistry, part of Thermo timer | +--------------+-------------+----------------------------------------------------+ - | 16 | Forcing | forcing | + | 17 | Forcing | forcing | +--------------+-------------+----------------------------------------------------+ - | 17 | 1d-evp | 1d evp, part of Dynamics timer | + | 18 | 1d-evp | 1d evp, part of Dynamics timer | +--------------+-------------+----------------------------------------------------+ - | 18 | 2d-evp | 2d evp, part of Dynamics timer | + | 19 | 2d-evp | 2d evp, part of Dynamics timer | +--------------+-------------+----------------------------------------------------+ - | 19 | UpdState | update state | + | 20 | UpdState | update state | +--------------+-------------+----------------------------------------------------+ .. _restartfiles: diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index 005f4f851..b8d42ad6d 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -229,7 +229,9 @@ boundary around the entire domain. It includes the following namelist modificat - ``dyrect``: ``16.e5`` cm - ``ktherm``: ``-1`` (disables thermodynamics) - ``coriolis``: ``constant`` (``f=1.46e-4`` s\ :math:`^{-1}`) -- ``ice_data_type`` : ``box2001`` (special ice concentration initialization) +- ``ice_data_type`` : ``box2001`` (special initial ice mask) +- ``ice_data_conc`` : ``p5`` +- ``ice_data_dist`` : ``box2001`` (special ice concentration initialization) - ``atm_data_type`` : ``box2001`` (special atmospheric and ocean forcing) Ocean stresses are computed as in :cite:`Hunke01` where they are circular and centered @@ -257,7 +259,9 @@ boundary around the entire domain. It includes the following namelist modificat - ``ktherm``: ``-1`` (disables thermodynamics) - ``kridge``: ``-1`` (disables ridging) - ``kdyn``: ``-1`` (disables dynamics) -- ``ice_data_type`` : ``boxslotcyl`` (special ice concentration and velocity initialization) +- ``ice_data_type`` : ``boxslotcyl`` (special initial ice mask) +- ``ice_data_conc`` : ``c1`` +- ``ice_data_dist`` : ``uniform`` Dynamics is disabled because we directly impose a constant ice velocity. The ice velocity field is circular and centered in the square domain, and such that the slotted cylinder makes a complete revolution with a period :math:`T=` 12 days :