Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

merge 2-way update bugfix from master #23

Merged
merged 16 commits into from
Apr 21, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion RELEASE.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,4 @@ The non-functional gfdl_cloud_microphys.F90 has been removed and replaced with t

The namelist nggps_diag_nml has been eliminated. 'fdiag' is no longer handled by the dynamical core, and should be handled by the physics driver.

For a complete technical description see the [forthcoming] GFDL Technical Memorandum.
For a complete technical description see the NOAA Technical Memorandum OAR GFDL: https://repository.library.noaa.gov/view/noaa/23432
36 changes: 19 additions & 17 deletions model/boundary.F90
Original file line number Diff line number Diff line change
Expand Up @@ -2306,6 +2306,8 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, dx, dy, are
position = CENTER
end if

!Note that *_c does not have values on the parent_proc.
!Must use isu, etc. to get bounds of update region on parent.
call mpp_get_F2C_index(nest_domain, is_c, ie_c, js_c, je_c, is_f, ie_f, js_f, je_f, nest_level=nest_level, position=position)
if (child_proc) then
allocate(coarse_dat_send(is_c:ie_c, js_c:je_c,npz))
Expand All @@ -2332,9 +2334,9 @@ subroutine update_coarse_grid_mpp(var_coarse, var_nest, nest_domain, dx, dy, are
s = r/2 !rounds down (since r > 0)
qr = r*upoff + nsponge - s

if (parent_proc .and. .not. (ie_c < is_c .or. je_c < js_c)) then
if (parent_proc .and. .not. (ieu < isu .or. jeu < jsu)) then
call fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed_p, &
is_c, ie_c, js_c, je_c, npx, npy, npz, istag, jstag, nestupdate, parent_grid)
isu, ieu, jsu, jeu, npx, npy, npz, istag, jstag, nestupdate, parent_grid)
endif

if (allocated(coarse_dat_recv)) deallocate(coarse_dat_recv)
Expand Down Expand Up @@ -2454,14 +2456,14 @@ subroutine fill_coarse_data_send(coarse_dat_send, var_nest, dx, dy, area, &
end subroutine fill_coarse_data_send

subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed_p, &
is_c, ie_c, js_c, je_c, npx, npy, npz, istag, jstag, nestupdate, parent_grid)
isu, ieu, jsu, jeu, npx, npy, npz, istag, jstag, nestupdate, parent_grid)

!This routine assumes the coarse and nested grids are properly
! aligned, and that in particular for odd refinement ratios all
! coarse-grid cells (faces) coincide with nested-grid cells (faces)

integer, intent(IN) :: isd_p, ied_p, jsd_p, jed_p
integer, intent(IN) :: is_c, ie_c, js_c, je_c
integer, intent(IN) :: isu, ieu, jsu, jeu
integer, intent(IN) :: istag, jstag
integer, intent(IN) :: npx, npy, npz, nestupdate
real, intent(INOUT) :: var_coarse(isd_p:ied_p+istag,jsd_p:jed_p+jstag,npz)
Expand All @@ -2475,10 +2477,10 @@ subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed
select case (nestupdate)
case (1,2,6,7,8) ! 1 = Conserving update on all variables; 2 = conserving update for cell-centered values; 6 = conserving remap-update

!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,coarse_dat_recv,parent_grid,var_coarse)
!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse)
do k=1,npz
do j=js_c,je_c
do i=is_c,ie_c
do j=jsu,jeu
do i=isu,ieu
var_coarse(i,j,k) = coarse_dat_recv(i,j,k)*parent_grid%gridstruct%rarea(i,j)
end do
end do
Expand All @@ -2498,10 +2500,10 @@ subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed
select case (nestupdate)
case (1,6,7,8)

!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,coarse_dat_recv,parent_grid,var_coarse)
!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse)
do k=1,npz
do j=js_c,je_c+1
do i=is_c,ie_c
do j=jsu,jeu+1
do i=isu,ieu
var_coarse(i,j,k) = coarse_dat_recv(i,j,k)*parent_grid%gridstruct%rdx(i,j)
end do
end do
Expand All @@ -2518,10 +2520,10 @@ subroutine fill_var_coarse(var_coarse, coarse_dat_recv, isd_p, ied_p, jsd_p, jed
select case (nestupdate)
case (1,6,7,8) !averaging update; in-line average for face-averaged values instead of areal average

!$OMP parallel do default(none) shared(npz,js_c,je_c,is_c,ie_c,coarse_dat_recv,parent_grid,var_coarse)
!$OMP parallel do default(none) shared(npz,jsu,jeu,isu,ieu,coarse_dat_recv,parent_grid,var_coarse)
do k=1,npz
do j=js_c,je_c
do i=is_c,ie_c+1
do j=jsu,jeu
do i=isu,ieu+1
var_coarse(i,j,k) = coarse_dat_recv(i,j,k)*parent_grid%gridstruct%rdy(i,j)
end do
end do
Expand Down Expand Up @@ -2611,13 +2613,13 @@ subroutine update_coarse_grid_mpp_vector(u_coarse, v_coarse, u_nest, v_nest, nes
s = r/2 !rounds down (since r > 0)
qr = r*upoff + nsponge - s

if (parent_proc .and. .not. (ie_cx < is_cx .or. je_cx < js_cx)) then
if (parent_proc .and. .not. (ieu < isu .or. jeu < jsu)) then
call fill_var_coarse(u_coarse, coarse_dat_recv_u, isd_p, ied_p, jsd_p, jed_p, &
is_cx, ie_cx, js_cx, je_cx, npx, npy, npz, istag_u, jstag_u, nestupdate, parent_grid)
isu, ieu, jsu, jeu, npx, npy, npz, istag_u, jstag_u, nestupdate, parent_grid)
endif
if (parent_proc .and. .not. (ie_cy < is_cy .or. je_cy < js_cy)) then
if (parent_proc .and. .not. (ieu < isu .or. jeu < jsu)) then
call fill_var_coarse(v_coarse, coarse_dat_recv_v, isd_p, ied_p, jsd_p, jed_p, &
is_cy, ie_cy, js_cy, je_cy, npx, npy, npz, istag_v, jstag_v, nestupdate, parent_grid)
isu, ieu, jsu, jeu, npx, npy, npz, istag_v, jstag_v, nestupdate, parent_grid)
endif

if (allocated(coarse_dat_recv_u)) deallocate(coarse_dat_recv_u)
Expand Down
2 changes: 1 addition & 1 deletion model/fv_arrays.F90
Original file line number Diff line number Diff line change
Expand Up @@ -584,7 +584,7 @@ module fv_arrays_mod
logical :: nested = .false.
integer :: nestbctype = 1
integer :: nsponge = 0
integer :: nestupdate = 0
integer :: nestupdate = 7
logical :: twowaynest = .false.
integer :: ioffset, joffset !Position of nest within parent grid
integer :: nlevel = 0 ! levels down from top-most domain
Expand Down
83 changes: 69 additions & 14 deletions model/fv_control.F90
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split)
integer, dimension(MAX_NNEST) :: all_npy = 0
integer, dimension(MAX_NNEST) :: all_npz = 0
integer, dimension(MAX_NNEST) :: all_ntiles = 0
integer, dimension(MAX_NNEST) :: all_twowaynest = 0 ! > 0 implies two-way
!integer, dimension(MAX_NNEST) :: tile_fine = 0
integer, dimension(MAX_NNEST) :: icount_coarse = 1
integer, dimension(MAX_NNEST) :: jcount_coarse = 1
Expand Down Expand Up @@ -468,13 +469,16 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split)
all_npz(this_grid) = npz
call mpp_max(all_npz, ngrids, global_pelist)

if (Atm(this_grid)%neststruct%twowaynest) all_twowaynest(this_grid) = 1
call mpp_max(all_twowaynest, ngrids, global_pelist)
ntiles_nest_all = 0
do n=1,ngrids
if (n/=this_grid) then
Atm(n)%flagstruct%npx = all_npx(n)
Atm(n)%flagstruct%npy = all_npy(n)
Atm(n)%flagstruct%npz = all_npz(n)
Atm(n)%flagstruct%ntiles = all_ntiles(n)
Atm(n)%neststruct%twowaynest = (all_twowaynest(n) > 0) ! disabled
endif
npes_nest_tile(ntiles_nest_all+1:ntiles_nest_all+all_ntiles(n)) = &
Atm(n)%npes_this_grid / all_ntiles(n)
Expand All @@ -494,7 +498,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split)
endif
enddo

if (mpp_pe() == 0) then
if (mpp_pe() == 0 .and. ngrids > 1) then
print*, ' NESTING TREE'
do n=1,ngrids
write(*,'(12i4)') n, nest_level(n), nest_ioffsets(n), nest_joffsets(n), icount_coarse(n), jcount_coarse(n), tile_fine(n), tile_coarse(n), nest_refine(n), all_ntiles(n), all_npx(n), all_npy(n)
Expand Down Expand Up @@ -564,24 +568,20 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split)

endif

allocate(Atm(this_grid)%neststruct%child_grids(ngrids)) !only temporary?
allocate(Atm(this_grid)%neststruct%child_grids(ngrids))
do n=1,ngrids
Atm(this_grid)%neststruct%child_grids(n) = (grid_coarse(n) == this_grid)
allocate(Atm(n)%neststruct%do_remap_bc(ngrids))
Atm(n)%neststruct%do_remap_bc(:) = .false.
enddo
Atm(this_grid)%neststruct%parent_proc = ANY(tile_coarse == Atm(this_grid)%global_tile)
!Atm(this_grid)%neststruct%child_proc = ANY(Atm(this_grid)%pelist == gid) !this means a nested grid
!!$ if (Atm(this_grid)%neststruct%nestbctype > 1) then
!!$ call mpp_error(FATAL, 'nestbctype > 1 not yet implemented')
!!$ Atm(this_grid)%neststruct%upoff = 0
!!$ endif
!!$ end if
!!$
!!$ do nn=1,size(Atm)
!!$ if (n == 1) allocate(Atm(nn)%neststruct%nest_domain_all(size(Atm)))
!!$ Atm(nn)%neststruct%nest_domain_all(n) = Atm(this_grid)%neststruct%nest_domain
!!$ enddo
Atm(this_grid)%neststruct%parent_proc = ANY(Atm(this_grid)%neststruct%child_grids) !ANY(tile_coarse == Atm(this_grid)%global_tile)
Atm(this_grid)%neststruct%child_proc = ASSOCIATED(Atm(this_grid)%parent_grid) !this means a nested grid

if (ngrids > 1) call setup_update_regions
if (Atm(this_grid)%neststruct%nestbctype > 1) then
call mpp_error(FATAL, 'nestbctype > 1 not yet implemented')
Atm(this_grid)%neststruct%upoff = 0
endif

if (Atm(this_grid)%gridstruct%bounded_domain .and. is_master()) print*, &
' Bounded domain: nested = ', Atm(this_grid)%neststruct%nested, ', regional = ', Atm(this_grid)%flagstruct%regional
Expand Down Expand Up @@ -1045,6 +1045,61 @@ subroutine read_namelist_fv_core_nml(Atm)

end subroutine read_namelist_fv_core_nml

subroutine setup_update_regions

integer :: isu, ieu, jsu, jeu ! update regions
integer :: isc, jsc, iec, jec
integer :: upoff

isc = Atm(this_grid)%bd%isc
jsc = Atm(this_grid)%bd%jsc
iec = Atm(this_grid)%bd%iec
jec = Atm(this_grid)%bd%jec

upoff = Atm(this_grid)%neststruct%upoff

do n=2,ngrids
write(*,'(I, A, 4I)') mpp_pe(), 'SETUP_UPDATE_REGIONS 0: ', mpp_pe(), tile_coarse(n), Atm(this_grid)%global_tile
if (tile_coarse(n) == Atm(this_grid)%global_tile) then

isu = nest_ioffsets(n)
ieu = isu + icount_coarse(n) - 1
jsu = nest_joffsets(n)
jeu = jsu + jcount_coarse(n) - 1

!update offset adjustment
isu = isu + upoff
ieu = ieu - upoff
jsu = jsu + upoff
jeu = jeu - upoff

!restriction to current domain
!!$ !!! DEBUG CODE
!!$ if (Atm(this_grid)%flagstruct%fv_debug) then
!!$ write(*,'(I, A, 4I)') mpp_pe(), 'SETUP_UPDATE_REGIONS : ', isu, jsu, ieu, jeu
!!$ write(*,'(I, A, 4I)') mpp_pe(), 'SETUP_UPDATE_REGIONS 2: ', isc, jsc, iec, jsc
!!$ endif
!!$ !!! END DEBUG CODE
if (isu > iec .or. ieu < isc .or. &
jsu > jec .or. jeu < jsc ) then
isu = -999 ; jsu = -999 ; ieu = -1000 ; jeu = -1000
else
isu = max(isu,isc) ; jsu = max(jsu,jsc)
ieu = min(ieu,iec) ; jeu = min(jeu,jec)
endif
!!$ !!! DEBUG CODE
!!$ if (Atm(this_grid)%flagstruct%fv_debug) &
!!$ write(*,'(I, A, 4I)') mpp_pe(), 'SETUP_UPDATE_REGIONS 3: ', isu, jsu, ieu, jeu
!!$ !!! END DEBUG CODE

Atm(n)%neststruct%isu = isu
Atm(n)%neststruct%ieu = ieu
Atm(n)%neststruct%jsu = jsu
Atm(n)%neststruct%jeu = jeu
endif
enddo

end subroutine setup_update_regions

end subroutine fv_control_init

Expand Down
Loading