Skip to content

Commit

Permalink
Update CICE to consortium master (NOAA-EMC#23)
Browse files Browse the repository at this point in the history
updates include:

* deprecate upwind advection (CICE-Consortium#508)
* add implicit VP solver (CICE-Consortium#491)
  • Loading branch information
DeniseWorthen committed Sep 22, 2023
1 parent 334c16b commit c075839
Show file tree
Hide file tree
Showing 33 changed files with 8,237 additions and 61 deletions.
489 changes: 489 additions & 0 deletions cicecore/cicedyn/dynamics/ice_dyn_eap.F90

Large diffs are not rendered by default.

24 changes: 24 additions & 0 deletions cicecore/cicedyn/general/ice_forcing.F90
Original file line number Diff line number Diff line change
Expand Up @@ -5122,6 +5122,7 @@ end subroutine ocn_data_ispol_init
!
subroutine box2001_data_atm

<<<<<<< HEAD:cicecore/cicedyn/general/ice_forcing.F90
! wind fields as in Hunke, JCP 2001
! these are defined at the u point
! authors: Elizabeth Hunke, LANL
Expand All @@ -5130,19 +5131,35 @@ subroutine box2001_data_atm
use ice_calendar, only: timesecs
use ice_blocks, only: block, get_block, nx_block, ny_block, nghost
use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray
=======
! wind and current fields as in Hunke, JCP 2001
! these are defined at the u point
! authors: Elizabeth Hunke, LANL

use ice_domain, only: nblocks
use ice_domain_size, only: max_blocks
use ice_blocks, only: nx_block, ny_block, nghost
use ice_flux, only: uocn, vocn, uatm, vatm, wind, rhoa, strax, stray
use ice_grid, only: uvm, to_ugrid
>>>>>>> f773ef38 (Update CICE to consortium master (#23)):cicecore/cicedynB/general/ice_forcing.F90
use ice_state, only: aice

! local parameters

integer (kind=int_kind) :: &
iblk, i,j ! loop indices

<<<<<<< HEAD:cicecore/cicedyn/general/ice_forcing.F90
integer (kind=int_kind) :: &
iglob(nx_block), & ! global indices
jglob(ny_block) ! global indices

type (block) :: &
this_block ! block information for current block
=======
real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: &
aiu ! ice fraction on u-grid
>>>>>>> f773ef38 (Update CICE to consortium master (#23)):cicecore/cicedynB/general/ice_forcing.F90

real (kind=dbl_kind) :: &
secday, pi , puny, period, pi2, tau
Expand All @@ -5154,6 +5171,8 @@ subroutine box2001_data_atm
call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny)
call icepack_query_parameters(secday_out=secday)

call to_ugrid(aice, aiu)

period = c4*secday

do iblk = 1, nblocks
Expand All @@ -5178,9 +5197,14 @@ subroutine box2001_data_atm
! wind stress
wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2)
tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk)
<<<<<<< HEAD:cicecore/cicedyn/general/ice_forcing.F90

strax(i,j,iblk) = aice(i,j,iblk) * tau * uatm(i,j,iblk)
stray(i,j,iblk) = aice(i,j,iblk) * tau * vatm(i,j,iblk)
=======
strax(i,j,iblk) = aiu(i,j,iblk) * tau * uatm(i,j,iblk)
stray(i,j,iblk) = aiu(i,j,iblk) * tau * vatm(i,j,iblk)
>>>>>>> f773ef38 (Update CICE to consortium master (#23)):cicecore/cicedynB/general/ice_forcing.F90

! initialization test
! Diagonal wind vectors 1
Expand Down
4 changes: 3 additions & 1 deletion cicecore/cicedyn/general/ice_step_mod.F90
Original file line number Diff line number Diff line change
Expand Up @@ -848,6 +848,7 @@ subroutine step_dyn_horiz (dt)

use ice_dyn_evp, only: evp
use ice_dyn_eap, only: eap
use ice_dyn_vp, only: implicit_solver
use ice_dyn_shared, only: kdyn, ktransport
use ice_flux, only: init_history_dyn
!deprecate upwind use ice_transport_driver, only: advection, transport_upwind, transport_remap
Expand All @@ -861,11 +862,12 @@ subroutine step_dyn_horiz (dt)
call init_history_dyn ! initialize dynamic history variables

!-----------------------------------------------------------------
! Elastic-viscous-plastic ice dynamics
! Ice dynamics (momentum equation)
!-----------------------------------------------------------------

if (kdyn == 1) call evp (dt)
if (kdyn == 2) call eap (dt)
if (kdyn == 3) call implicit_solver (dt)

!-----------------------------------------------------------------
! Horizontal ice transport
Expand Down
70 changes: 70 additions & 0 deletions cicecore/cicedyn/infrastructure/comm/mpi/ice_global_reductions.F90
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ module ice_global_reductions
private

public :: global_sum, &
global_allreduce_sum, &
global_sum_prod, &
global_maxval, &
global_minval
Expand All @@ -55,6 +56,12 @@ module ice_global_reductions
global_sum_scalar_int
end interface

interface global_allreduce_sum
module procedure global_allreduce_sum_vector_dbl!, &
! module procedure global_allreduce_sum_vector_real, & ! not yet implemented
! module procedure global_allreduce_sum_vector_int ! not yet implemented
end interface

interface global_sum_prod
module procedure global_sum_prod_dbl, &
global_sum_prod_real, &
Expand Down Expand Up @@ -740,6 +747,69 @@ function global_sum_scalar_int(scalar, dist) &

end function global_sum_scalar_int

!***********************************************************************

function global_allreduce_sum_vector_dbl(vector, dist) &
result(globalSums)

! Computes the global sums of sets of scalars (elements of 'vector')
! distributed across a parallel machine.
!
! This is actually the specific interface for the generic global_allreduce_sum
! function corresponding to double precision vectors. The generic
! interface is identical but will handle real and integer vectors.

real (dbl_kind), dimension(:), intent(in) :: &
vector ! vector whose components are to be summed

type (distrb), intent(in) :: &
dist ! block distribution

real (dbl_kind), dimension(size(vector)) :: &
globalSums ! resulting array of global sums

!-----------------------------------------------------------------------
!
! local variables
!
!-----------------------------------------------------------------------

integer (int_kind) :: &
ierr, &! mpi error flag
numProcs, &! number of processor participating
numBlocks, &! number of local blocks
communicator, &! communicator for this distribution
numElem ! number of elements in vector

real (dbl_kind), dimension(:,:), allocatable :: &
work ! temporary local array

character(len=*), parameter :: subname = '(global_allreduce_sum_vector_dbl)'

!-----------------------------------------------------------------------
!
! get communicator for MPI calls
!
!-----------------------------------------------------------------------

call ice_distributionGet(dist, &
numLocalBlocks = numBlocks, &
nprocs = numProcs, &
communicator = communicator)

numElem = size(vector)
allocate(work(1,numElem))
work(1,:) = vector
globalSums = c0

call compute_sums_dbl(work,globalSums,communicator,numProcs)

deallocate(work)

!-----------------------------------------------------------------------

end function global_allreduce_sum_vector_dbl

!***********************************************************************

function global_sum_prod_dbl (array1, array2, dist, field_loc, &
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ module ice_global_reductions
private

public :: global_sum, &
global_allreduce_sum, &
global_sum_prod, &
global_maxval, &
global_minval
Expand All @@ -56,6 +57,12 @@ module ice_global_reductions
global_sum_scalar_int
end interface

interface global_allreduce_sum
module procedure global_allreduce_sum_vector_dbl!, &
! module procedure global_allreduce_sum_vector_real, & ! not yet implemented
! module procedure global_allreduce_sum_vector_int ! not yet implemented
end interface

interface global_sum_prod
module procedure global_sum_prod_dbl, &
global_sum_prod_real, &
Expand Down Expand Up @@ -741,6 +748,69 @@ function global_sum_scalar_int(scalar, dist) &

end function global_sum_scalar_int

!***********************************************************************

function global_allreduce_sum_vector_dbl(vector, dist) &
result(globalSums)

! Computes the global sums of sets of scalars (elements of 'vector')
! distributed across a parallel machine.
!
! This is actually the specific interface for the generic global_allreduce_sum
! function corresponding to double precision vectors. The generic
! interface is identical but will handle real and integer vectors.

real (dbl_kind), dimension(:), intent(in) :: &
vector ! vector whose components are to be summed

type (distrb), intent(in) :: &
dist ! block distribution

real (dbl_kind), dimension(size(vector)) :: &
globalSums ! resulting array of global sums

!-----------------------------------------------------------------------
!
! local variables
!
!-----------------------------------------------------------------------

integer (int_kind) :: &
ierr, &! mpi error flag
numProcs, &! number of processor participating
numBlocks, &! number of local blocks
communicator, &! communicator for this distribution
numElem ! number of elements in vector

real (dbl_kind), dimension(:,:), allocatable :: &
work ! temporary local array

character(len=*), parameter :: subname = '(global_allreduce_sum_vector_dbl)'

!-----------------------------------------------------------------------
!
! get communicator for MPI calls
!
!-----------------------------------------------------------------------

call ice_distributionGet(dist, &
numLocalBlocks = numBlocks, &
nprocs = numProcs, &
communicator = communicator)

numElem = size(vector)
allocate(work(1,numElem))
work(1,:) = vector
globalSums = c0

call compute_sums_dbl(work,globalSums,communicator,numProcs)

deallocate(work)

!-----------------------------------------------------------------------

end function global_allreduce_sum_vector_dbl

!***********************************************************************

function global_sum_prod_dbl (array1, array2, dist, field_loc, &
Expand Down
Loading

0 comments on commit c075839

Please sign in to comment.